{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \"\"\"\n\ndefault_tpl = default_header_tpl + '{{body}}' + default_footer_tpl\n\nhealthcheck_tpl = default_header_tpl + \"\"\"\n
✓ Proxy is running fine.
\"\"\" + default_footer_tpl\n\nerror_tpl = default_header_tpl + \"\"\"\n
✘ {{error}}
\"\"\" + default_footer_tpl\n\ninstall_success_tpl = default_header_tpl + \"\"\"\n
✓ Installation done.
\n

\n%if remote_page_call_status_code != 200:\n
✘ Error calling the gh-pages page (Status {{remote_page_call_status_code}}). Please check the env vars (obfuscator, repositoryOwner and repositoryName) and place a index.html inside the obfuscator dir.
\n%else:\n
✓ Success calling the gh-pages page.
\n%end\n\"\"\" + default_footer_tpl\n\n#\n# HELPERS\n#\ndef return_json(object, response):\n response.set_header('Content-Type', 'application/json')\n return json.dumps(object)\n\ndef create_jwt_token():\n return jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=4)}, jwt_secret, algorithm='HS256')\n\n\ndef valid_jwt_token(token):\n try:\n res = jwt.decode(token, jwt_secret, algorithms=['HS256'])\n print (res)\n return True\n except JWSError:\n return False\n\ndef check_pass(username, password):\n #\n # First check if already valid JWT Token in Cookie\n #\n auth_cookie = request.get_cookie(\"cs-proxy-auth\")\n if auth_cookie and valid_jwt_token(auth_cookie):\n print ('PROXY-AUTH: found valid JWT Token in cookie')\n return True\n\n #\n # GitHub Basic Auth - also working with username + personal_access_token\n #\n print ('PROXY-AUTH: doing github basic auth - authType: {0}, owner: {1}'.format(auth_type, owner))\n basic_auth = HTTPBasicAuth(username, password)\n auth_response = requests.get('https://api.github.com/user', auth=basic_auth)\n if auth_response.status_code == 200:\n if auth_type == 'onlyGitHubOrgUsers':\n print ('PROXY-AUTH: doing org membership request')\n org_membership_response = requests.get('https://api.github.com/user/orgs', auth=basic_auth)\n if org_membership_response.status_code == 200:\n for org in org_membership_response.json():\n if org['login'] == owner:\n response.set_cookie(\"cs-proxy-auth\", create_jwt_token())\n return True\n return False\n else:\n response.set_cookie(\"cs-proxy-auth\", create_jwt_token())\n return True\n return False\n\n\ndef normalize_proxy_url(url):\n print ('URL:')\n print (url)\n if url.endswith('/') or url == '':\n return '{0}index.html'.format(url)\n return url\n\ndef proxy_trough_helper(url):\n print ('PROXY-GET: {0}'.format(url))\n proxy_response = requests.get(url)\n if proxy_response.status_code == 200:\n if proxy_response.headers['Last-Modified']:\n response.set_header('Last-Modified', proxy_response.headers['Last-Modified'])\n if proxy_response.headers['Content-Type']:\n response.set_header('Content-Type', proxy_response.headers['Content-Type'])\n if proxy_response.headers['Expires']:\n response.set_header('Expires', proxy_response.headers['Expires'])\n return proxy_response\n else:\n return HTTPResponse(status=proxy_response.status_code,\n body=template(error_tpl,\n headline='Error {0}'.format(proxy_response.status_code),\n error='error during proxy call'))\n\n\n\n\n#\n# BOTTLE APP\n#\ndef run_proxy(args):\n\n #\n # ERROR HANDLERS\n #\n @error(401)\n def error404(error):\n return template(error_tpl, headline='Error '+error.status, error=error.body)\n\n @error(500)\n def error500(error):\n return template(error_tpl, headline='Error '+error.status, error=error.body)\n\n #\n # SPECIAL ENDPOINTS\n #\n @route('/health')\n def hello():\n return template(healthcheck_tpl, headline='Healthcheck')\n\n @route('/install-success')\n def hello():\n remote_page_call_status_code = proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, '/')).status_code\n return template(install_success_tpl, headline='Installation Success', remote_page_call_status_code=remote_page_call_status_code)\n\n #\n # make args available in auth callback\n #\n global owner, auth_type\n owner = args.owner\n auth_type = args.authType\n\n @route('/')\n @auth_basic(check_pass)\n def proxy_trough(url):\n return proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, normalize_proxy_url(url)))\n\n @route('/')\n @auth_basic(check_pass)\n def proxy_trough_root_page():\n return proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, '/index.html'))\n\n #\n # RUN BY ENVIRONMENT\n #\n if args.environment == 'wsgi':\n run(host='localhost', port=args.port, debug=True)\n if args.environment == 'heroku':\n run(host=\"0.0.0.0\", port=int(args.port))\n else:\n run(server='cgi')\n\n"},"size":{"kind":"number","value":8628,"string":"8,628"}}},{"rowIdx":128330,"cells":{"max_stars_repo_path":{"kind":"string","value":"maskrcnn_benchmark/modeling/roi_heads/relation_head/utils_co_attention.py"},"max_stars_repo_name":{"kind":"string","value":"dongxingning/SHA_GCL_for_SGG"},"max_stars_count":{"kind":"number","value":5,"string":"5"},"id":{"kind":"string","value":"2171263"},"content":{"kind":"string","value":"\"\"\"\nBased on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom maskrcnn_benchmark.modeling.roi_heads.relation_head.model_transformer import ScaledDotProductAttention,\\\n MultiHeadAttention, PositionwiseFeedForward\n\nclass Single_Att_Layer(nn.Module):\n ''' Compose with two layers '''\n def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):\n super(Single_Att_Layer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, q_input, k_input, v_input, non_pad_mask=None, slf_attn_mask=None):\n enc_output, enc_slf_attn = self.slf_attn(\n q_input, k_input, v_input, mask=slf_attn_mask)\n enc_output *= non_pad_mask.float()\n\n enc_output = self.pos_ffn(enc_output)\n enc_output *= non_pad_mask.float()\n\n return enc_output, enc_slf_attn\n\nclass Self_Attention_Encoder(nn.Module):\n \"\"\"\n A encoder model with self attention mechanism.\n \"\"\"\n def __init__(self, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):\n super().__init__()\n self.transformer_layer = Single_Att_Layer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n\n def forward(self, input_feats, num_objs):\n\n input_feats = input_feats.split(num_objs, dim=0)\n input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True)\n\n # -- Prepare masks\n bsz = len(num_objs)\n device = input_feats.device\n pad_len = max(num_objs)\n num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)\n slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)\n non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)\n\n # -- Forward\n enc_output, enc_slf_attn = self.transformer_layer(\n input_feats, input_feats, input_feats,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n enc_output = enc_output[non_pad_mask.squeeze(-1)]\n return enc_output\n\nclass Cross_Attention_Encoder(nn.Module):\n \"\"\"\n A encoder model with self attention mechanism.\n \"\"\"\n def __init__(self, n_head, d_k, d_v, d_model, d_inner, dropout=0.1):\n super().__init__()\n self.transformer_layer = Single_Att_Layer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)\n\n def forward(self, visual_feats, textual_feats, num_objs):\n\n visual_feats = visual_feats.split(num_objs, dim=0)\n visual_feats = nn.utils.rnn.pad_sequence(visual_feats, batch_first=True)\n textual_feats = textual_feats.split(num_objs, dim=0)\n textual_feats = nn.utils.rnn.pad_sequence(textual_feats, batch_first=True)\n\n # -- Prepare masks\n bsz = len(num_objs)\n device = visual_feats.device\n pad_len = max(num_objs)\n num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len)\n slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len)\n non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1)\n\n # -- Forward\n enc_output, enc_slf_attn = self.transformer_layer(\n visual_feats, textual_feats, textual_feats,\n non_pad_mask=non_pad_mask,\n slf_attn_mask=slf_attn_mask)\n\n enc_output = enc_output[non_pad_mask.squeeze(-1)]\n return enc_output\n\n"},"size":{"kind":"number","value":3885,"string":"3,885"}}},{"rowIdx":128331,"cells":{"max_stars_repo_path":{"kind":"string","value":"pace/encryption/encryption_exceptions.py"},"max_stars_repo_name":{"kind":"string","value":"LaudateCorpus1/PACE-python"},"max_stars_count":{"kind":"number","value":7,"string":"7"},"id":{"kind":"string","value":"2171883"},"content":{"kind":"string","value":"## **************\n## Copyright 2015 MIT Lincoln Laboratory\n## Project: PACE\n## Authors: ATLH\n## Description: Contains exceptions for encryption code\n## Modifications:\n## Date Name Modification\n## ---- ---- ------------\n## 10 Aug 2015 ATLH Original file \n## **************\n\nclass EncryptionException(Exception):\n \"\"\" Exception raised when unable to encrypt.\n \n Attributes:\n msg - error message for situation\n \"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg\n \nclass DecryptionException(Exception):\n \"\"\" Exception raised when unable to decrypt.\n \n Attributes:\n msg - error message for situation\n \"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return self.msg"},"size":{"kind":"number","value":848,"string":"848"}}},{"rowIdx":128332,"cells":{"max_stars_repo_path":{"kind":"string","value":"models/train_classifier.py"},"max_stars_repo_name":{"kind":"string","value":"rmkeeler/udacity-project-disaster-alerts"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172432"},"content":{"kind":"string","value":"# System packages\nimport sys\n\n# NLP packages\nimport nltk\nnltk.download(['punkt','stopwords','wordnet'])\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n# Analysis packages\nimport numpy as np\nimport pandas as pd\nimport re\nfrom sqlalchemy import create_engine\nimport pickle as pkl\n\n# Machine Learning packages\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.metrics import classification_report\n\n\ndef load_data(database_filepath):\n \"\"\"\n Get the database from the \"data\" folder in this project's structure.\n Relevant data must be in a table called \"messages\" in that database.\n\n Return X, y and category names.\n X is the single column containing message text. We'll extract features from it.\n y is an array of multiple categories, so this is a multioutput classifier problem.\n y categories take on 1 if category applies to a message, otherwise 0.\n Category names are the names of the categories appearing in y.\n \"\"\"\n engine = create_engine('sqlite:///' + database_filepath)\n df = pd.read_sql_table(table_name = 'messages', con = engine)\n\n feature_vars = ['message']\n non_vars = ['id','original','genre']\n\n target_vars = [x for x in df.columns if x not in feature_vars + non_vars]\n\n X = df.message.values\n y = df[target_vars].values\n cats = df[target_vars].columns.values\n\n return X, y, cats\n\n\ndef tokenize(text):\n \"\"\"\n Simple tokenizer we'll use in grid search to see if it's better than\n CountVectorizer's default tokenizer.\n\n 1. Normalize: Strip punctuation and convert to lower\n 2. Tokenize: Split message into individual words\n 3. Lemmatize: Reduce words to their root, using verb part of speech.\n \"\"\"\n punct = re.compile('[^A-Za-z0-9]')\n norm = punct.sub(' ', text.lower())\n\n tokens = [x for x in word_tokenize(norm) if x not in stopwords.words('english')]\n\n lemmatizer = WordNetLemmatizer()\n lemms = [lemmatizer.lemmatize(w, 'v') for w in tokens]\n\n return lemms\n\n\ndef build_model():\n \"\"\"\n Build a pipeline to extract features from messages and then run them\n through a multioutput classifier.\n\n GridSearchCV will test Random Forest against Multinomial Naive Bayes to\n see which performs best.\n \"\"\"\n pipeline = Pipeline([\n ('vect', CountVectorizer(tokenizer = tokenize)),\n ('tfidf', TfidfTransformer()),\n ('clf', MultiOutputClassifier(estimator = RandomForestClassifier()))\n ])\n\n params = {\n 'vect__max_features':[None, 5000],\n 'tfidf__use_idf':[True, False],\n 'clf__estimator':[RandomForestClassifier(), MultinomialNB()]\n }\n\n cv = GridSearchCV(pipeline, param_grid = params, cv = 2, verbose = 3)\n\n return cv\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n \"\"\"\n Eval model using classification_report().\n Above each eval table, print the name of the output var being evaluated.\n\n Does nothing but eval the model and print output to console.\n \"\"\"\n y_pred = model.predict(X_test)\n\n for i in range(y_pred.shape[1]):\n print('{}'.format(category_names[i]))\n print(classification_report(Y_test[:,i], y_pred[:,i]) + '\\n')\n\ndef save_model(model, model_filepath):\n \"\"\"\n Save the cv model to filepath specified in cmd prompt. As pickle.\n \"\"\"\n pkl.dump(model, open(model_filepath, 'wb'))\n\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n\n print('Building model...')\n model = build_model()\n\n print('Training model...')\n model.fit(X_train, Y_train)\n\n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":4739,"string":"4,739"}}},{"rowIdx":128333,"cells":{"max_stars_repo_path":{"kind":"string","value":"other/greater.py"},"max_stars_repo_name":{"kind":"string","value":"kirviz/algorithms"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172147"},"content":{"kind":"string","value":"def calculate(arr):\n result = []\n for i, num in enumerate(arr):\n greater = larger(num, arr[i+1:])\n result.append(greater if greater is not None else -1)\n\n print(' '.join(map(str, result)))\n\n\ndef larger(than, inArray):\n for n in inArray:\n if n > than:\n return n\n\nif __name__ == \"__main__\":\n calculate([1, 3, 2, 4])\n"},"size":{"kind":"number","value":361,"string":"361"}}},{"rowIdx":128334,"cells":{"max_stars_repo_path":{"kind":"string","value":"FIR_Filter2.py"},"max_stars_repo_name":{"kind":"string","value":"hassan-alhujhoj/ENEL420-FIR-IIR-Filters-for-ECG-Singals"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172385"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nimport numpy as np\nfrom scipy import signal\nfrom scipy.fftpack import fft\n\n\n# Data Import\n# Opens the group data in read only mode\n# Appends the data to a list\n\n# In[2]:\n\n\nfile = open(\"Signal_files/enel420_grp_1.txt\", \"r\")\ny_0 = []\nfor line in file:\n words = line.split(\" \")\n for word in words:\n if word != \"\":\n y_0.append(float(word))\nfs = 1024\nN = len(y_0)\nN_2 = int(N/2)\nt = [x/fs for x in list(range(0, N))]\nf = [x*fs/N for x in list(range(0, N_2))]\n\nFFT_0 = fft(y_0)\n\n\n# In[3]:\n\n\nplt.figure()\nplt.plot(t, y_0, linewidth=0.5)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Unfiltered ECG Signal\")\nplt.savefig(\"Graphs/Task_1_1.png\")\n\n\n# In[4]:\n\n\nplt.figure()\nplt.plot(t, y_0, linewidth=0.5)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Unfiltered ECG Signal\")\nplt.xlim([1, 2])\nplt.savefig(\"Graphs/Task_1_2.png\")\n\n\n# In[5]:\n\n\nplt.figure()\nplt.plot(f, abs(FFT_0)[:N_2])\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Unfiltered ECG Signal Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_2_1.png\")\n\n\n# In[6]:\n\n\nplt.figure()\nplt.plot(f, 20*np.log10(abs(FFT_0[:N_2])))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Magnitude (dB)\")\nplt.title(\"Unfiltered ECG Signal Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_2_2.png\")\n\n\n# In[7]:\n\n\nN_Coeff = 400 # Number of coefficients\nnoise_f = [31.45, 74.36] # Desired stop bands, Hz\n\n\n# In[8]:\n\n\n# Window Filtering\nwidth_WF = 8 # Width of stop band, Hz\nband_1 = [noise_f[0] -width_WF/2, noise_f[0]+width_WF/2] # Define band 1 bounds\nband_2 = [noise_f[1] -width_WF/2, noise_f[1]+width_WF/2] # Define band 2 bounds\n\nfilter1_WF = signal.firwin(N_Coeff+1, band_1, window='hann', pass_zero='bandstop', fs=fs) # Filter for noise frequency 1\nfilter2_WF = signal.firwin(N_Coeff+1, band_2, window='hann', pass_zero='bandstop', fs=fs) # Filter for noise frequency 2\nfilter_WF = signal.convolve(filter1_WF, filter2_WF) # Combined filter for noise frequencies\ny_WF = signal.lfilter(filter_WF, 1, y_0) # Apply noise filters to original data\nf_WF, h_WF = signal.freqz(filter_WF, 1, fs=fs) #\nFFT_WF = fft(y_WF)\n\n\n# In[9]:\n\n\n# Frequency Spectrum Window Filter Response\nplt.figure()\nplt.plot(f, abs(FFT_WF[:N_2]))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Window Filter Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_5_1_1.png\")\n\n\n# In[10]:\n\n\n# Decibel Frequency Spectrum Window Filter Response\nplt.figure()\nplt.plot(f, 20*np.log10(abs(FFT_WF[:N_2])))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Magnitude (dB)\")\nplt.title(\"Window Filter Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_5_1_2.png\")\n\n\n# In[11]:\n\n\n# Frequency Response Window Filter Response\nplt.figure()\nplt.plot(f_WF, 20* np.log10(abs(h_WF)))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Magnitude (dB)\")\nplt.title(\"Window Filter Frequency Response\")\nplt.savefig(\"Graphs/Task_5_1_3.png\")\n\n\n# In[12]:\n\n\nplt.figure()\nplt.plot(t, y_WF, linewidth=0.5)\nplt.xlabel(\"Time (secs)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Window Filter Data\")\nplt.savefig(\"Graphs/Task_5_1_4.png\")\n\n\n# In[13]:\n\n\nplt.figure()\nplt.plot(t, y_WF, linewidth=0.5)\nplt.xlabel(\"Time (secs)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Window Filter Data\")\nplt.xlim([1, 2])\nplt.savefig(\"Graphs/Task_5_1_5.png\")\n\n\n# In[14]:\n\n\ntrans_PM = 4 # Width of transition from pass band to stop band, Hz\nwidth_PM = 8# Width of transition from pass band, Hz\n\n# Filter Bands for filtering frequency 1 & 2\nband1_PM = [0, noise_f[0] -width_PM-trans_PM, noise_f[0] -width_WF/2, noise_f[0]+width_PM/2, noise_f[0]+width_PM/2+trans_PM, fs/2]\nband2_PM = [0, noise_f[1] -width_PM-trans_PM, noise_f[1] -width_WF/2, noise_f[1]+width_PM/2, noise_f[1]+width_PM/2+trans_PM, fs/2]\ngain_PM = [1, 0, 1]\n\n# Create filters for filtering frequency 1 & 2\nfilter1_PM = signal.remez(N_Coeff+1, band1_PM, gain_PM, fs=fs) # Filter frequency 1\nfilter2_PM = signal.remez(N_Coeff+1, band2_PM, gain_PM, fs=fs) # Filter frequency 2\nfilter_PM = signal.convolve(filter1_PM, filter2_PM) # Combined Filter\n\ny_PM = signal.lfilter(filter_PM, 1, y_0) # Filter original data in time domain\nf_PM, h_PM = signal.freqz(filter_PM, 1, fs=fs) # Return filter frequency response\nFFT_PM = fft(y_PM) # Filtered data frequency domain response\n\n\n# In[15]:\n\n\nplt.figure()\nplt.plot(f, abs(FFT_PM[:N_2]))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Parks-McClellan Filter Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_5_2_1.png\")\n\n\n# In[16]:\n\n\nplt.figure()\nplt.plot(f_PM, 20*np.log10(abs(h_PM)))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Magnitude (dB)\")\nplt.title(\"Parks-McClellan Filter Frequency Response\")\nplt.savefig(\"Graphs/Task_5_2_2.png\")\n\n\n# In[17]:\n\n\nplt.figure()\nplt.plot(t, y_PM, linewidth=0.5)\nplt.xlabel(\"Time (secs)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Parks-McClellan Filter Data\")\nplt.savefig(\"Graphs/Task_5_2_3.png\")\n\n\n# In[18]:\n\n\nplt.figure()\nplt.plot(t, y_PM, linewidth=0.5)\nplt.xlabel(\"Time (secs)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Parks-McClellan Filter Data\")\nplt.xlim([1, 2])\nplt.savefig(\"Graphs/Task_5_2_4.png\")\n\n\n# In[19]:\n\n\ntrans_FS = 4 # Width of transition from pass band to stop band, Hz\nwidth_FS = 8 # Width of the stop band, Hz\nband1_FS = [0, noise_f[0] -width_FS/2-trans_FS, noise_f[0] -width_FS/2, noise_f[0]+width_FS/2, noise_f[0]+width_FS/2+trans_FS, fs/2]\nband2_FS = [0, noise_f[1] -width_FS/2-trans_FS, noise_f[1] -width_FS/2, noise_f[1]+width_FS/2, noise_f[1]+width_FS/2+trans_FS, fs/2]\ngain_FS = [1, 1, 0, 0, 1, 1] # Gain coefficients of bands\n\nfilter1_FS = signal.firwin2(N_Coeff+1, band1_FS, gain_FS, fs=fs) # Filter for noise frequency 1\nfilter2_FS = signal.firwin2(N_Coeff+1, band2_FS, gain_FS, fs=fs) # Filter for noise frequency 2\nfilter_FS = signal.convolve(filter1_FS, filter2_FS) # Filter for both noise frequencies\n\ny_FS = signal.lfilter(filter_FS, 1, y_0) # Apply filter to time domain data\nf_FS, h_FS = signal.freqz(filter_FS, 1, fs=fs) # Filter Response\nFFT_FS = fft(y_FS) # Filtered Frequency Domain Response\n\n\n# In[20]:\n\n\nplt.figure()\nplt.plot(f, abs(FFT_FS[:N_2]))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Frequency Sampling Filter Frequency Spectrum\")\nplt.savefig(\"Graphs/Task_5_3_1.png\")\n\n\n# In[21]:\n\n\nplt.figure()\nplt.plot(f_FS, 20*np.log10(abs(h_FS)))\nplt.xlabel(\"Frequency (Hz)\")\nplt.ylabel(\"Magnitude (dB)\")\nplt.title(\"Frequency Sampling Frequency Response\")\nplt.savefig(\"Graphs/Task_5_3_2.png\")\n\n\n# In[22]:\n\n\nplt.figure()\nplt.plot(t, y_FS, linewidth=0.5)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Frequency Sampling Data\")\nplt.savefig(\"Graphs/Task_5_3_3.png\")\n\n\n# In[23]:\n\n\nplt.figure()\nplt.plot(t, y_FS, linewidth=0.5)\nplt.xlabel(\"Time (s)\")\nplt.ylabel(\"Voltage (uV)\")\nplt.title(\"Frequency Sampling Data\")\nplt.xlim([1, 2])\nplt.savefig(\"Graphs/Task_5_3_4.png\")\n\n\n# In[29]:\n\n\nP_0 = np.var(y_0)\nP_WF = np.var(y_WF)\nP_PM = np.var(y_PM)\nP_FS = np.var(y_FS)\n\nN_WF = P_0 - P_WF\nN_FS = P_0 - P_FS\nN_PM = P_0 - P_PM\nprint(\"No Filter Power: {:.2f}\".format(np.var(y_0)))\nprint(\"Window Filter Power: {:.2f}\".format(np.var(y_WF)))\nprint(\"Parks-McLellan Filter Power: {:.2f}\".format(np.var(y_PM)))\nprint(\"Frequency Sampling Filter Power: {:.2f}\".format(np.var(y_FS)))\n\nprint(\"Window Filter Noise Power: {:.2f}\".format(N_WF))\nprint(\"Parks-McLellan Filter Noise Power: {:.2f}\".format(N_PM))\nprint(\"Frequency Sampling Noise Filter Power: {:.2f}\".format(N_FS))\n\n\n# In[ ]:\n\n\n\n\n"},"size":{"kind":"number","value":7598,"string":"7,598"}}},{"rowIdx":128335,"cells":{"max_stars_repo_path":{"kind":"string","value":"code/python/FactSetESG/v1/fds/sdk/FactSetESG/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"factset/enterprise-sdk"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2171782"},"content":{"kind":"string","value":"# flake8: noqa\n\n\"\"\"\n FactSet ESG API\n\n FactSet ESG (powered by FactSet Truvalue Labs) applies machine learning to uncover risks and opportunities from companies' Environmental, Social and Governance (ESG) behavior, which are aggregated and categorized into continuously updated, material ESG scores. The service focuses on company ESG behavior from external sources and includes both positive and negative events that go beyond traditional sources of ESG risk data.

FactSet ESG extracts, analyzes, and generates scores from millions of documents each month collected from more than 100,000 data sources in over 13 languages. Sources include news, trade journals, NGOs, watchdog groups, trade blogs, industry reports and social media. Products deliver investable insights by revealing value and risk factors from unstructured data at the speed of current events.

# noqa: E501\n\n The version of the OpenAPI document: 1.3.0\n Contact: \n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\n__version__ = \"0.20.0\"\n\n# import ApiClient\nfrom fds.sdk.FactSetESG.api_client import ApiClient\n\n# import Configuration\nfrom fds.sdk.FactSetESG.configuration import Configuration\n\n# import exceptions\nfrom fds.sdk.FactSetESG.exceptions import OpenApiException\nfrom fds.sdk.FactSetESG.exceptions import ApiAttributeError\nfrom fds.sdk.FactSetESG.exceptions import ApiTypeError\nfrom fds.sdk.FactSetESG.exceptions import ApiValueError\nfrom fds.sdk.FactSetESG.exceptions import ApiKeyError\nfrom fds.sdk.FactSetESG.exceptions import ApiException\n"},"size":{"kind":"number","value":1552,"string":"1,552"}}},{"rowIdx":128336,"cells":{"max_stars_repo_path":{"kind":"string","value":"hostman/__init__.py"},"max_stars_repo_name":{"kind":"string","value":"jonhadfield/hostman"},"max_stars_count":{"kind":"number","value":20,"string":"20"},"id":{"kind":"string","value":"2172025"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Hostman.\n\nUsage:\n hostman add [-fqbvq] [--force] [--path=PATH]\n ( [ENTRY ...] | [--input-file=FILE] | [--input-url=URL] )\n hostman remove [-qbvq] ([--address=
] [--names=]) [--path=PATH]\n [--input-file=FILE] [--input-url=URL]\n hostman --version\n\nOptions:\n -h --help show this help message and exit\n --version show version and exit\n -f --force replace matching entries\n --address=ADDRESS ipv6 or ipv4 address\n --names=NAMES host names\n -q --quiet report only failures\n -p --path=PATH location of hosts file (attempts to detect default)\n -i --input-file=FILE file containing hosts to import\n -u --input-url=URL url of file containing hosts to import\n -b --backup create a backup before writing any changes\n --exclude=VALUE comma separated list of names or addresses\n to exclude from operation [default: 127.0.0.1]\n -v --verbose print verbose output\n\"\"\"\n\nfrom __future__ import print_function\nfrom docopt import docopt\nfrom python_hosts import Hosts, HostsEntry\nfrom .utils import is_writeable, is_readable\nimport sys\nimport os\nimport datetime\nimport shutil\nfrom colorama import Fore, init\n\ninit(autoreset=True)\nname = \"hostman\"\n\n\ndef backup_hosts(source=None, extension=None):\n \"\"\"Backup a hosts file\n\n :param source: Path to the hosts file\n :param extension: The extension to add to the backup file\n :return: A dict containing the result and user message to output\n \"\"\"\n if not extension:\n now = datetime.datetime.now()\n ext = now.strftime('%Y%m%d%H%M%S')\n else:\n ext = extension\n dest_split = source.split('/')\n new_filename = \".{0}.{1}\".format(dest_split[-1], ext)\n dest_split[-1] = new_filename\n dest = \"/\".join(dest_split)\n try:\n shutil.copy(source, dest)\n return {'result': 'success', 'message': 'Backup written to: {0}'.format(dest)}\n except IOError:\n return {'result': 'failed', 'message': 'Cannot create backup file: {0}'.format(dest)}\n\n\ndef output_message(message=None, quiet=False):\n \"\"\"User friendly result of action\n\n :param message: A dict containing the result and a user notification message\n :return: Exit with 0 or 1, or True if this is not the final output\n \"\"\"\n res = message.get('result')\n if res == 'success':\n if not quiet:\n print(Fore.GREEN + message.get('message'))\n sys.exit(0)\n elif res == 'failed':\n print(Fore.RED + message.get('message'))\n sys.exit(1)\n elif res == 'continue':\n if not quiet:\n print(message.get('message'))\n return True\n\n\ndef add(entry_line=None, hosts_path=None, force_add=False):\n \"\"\"Add the specified entry\n\n :param entry_line: The entry to add\n :param hosts_path: The path of the hosts file\n :param force_add: Replace matching any matching entries with new entry\n :return: A dict containing the result and user message to output\n \"\"\"\n hosts_entry = HostsEntry.str_to_hostentry(entry_line)\n if not hosts_entry:\n output_message({'result': 'failed',\n 'message': '\"{0}\": is not a valid entry.'.format(entry_line)})\n\n duplicate_entry = False\n entry_to_add = False\n\n hosts = Hosts(hosts_path)\n add_result = hosts.add(entries=[hosts_entry], force=force_add)\n if add_result.get('replaced_count'):\n hosts.write()\n return {'result': 'success',\n 'message': 'Entry added. Matching entries replaced.'}\n if add_result.get('ipv4_count') or add_result.get('ipv6_count'):\n entry_to_add = True\n if add_result.get('duplicate_count'):\n duplicate_entry = True\n if entry_to_add and not duplicate_entry:\n hosts.write()\n return {'result': 'success',\n 'message': 'New entry added.'}\n if not force_add and duplicate_entry:\n return {'result': 'failed',\n 'message': 'New entry matches one or more existing.'\n '\\nUse -f to replace similar entries.'}\n\n\ndef import_from_file(hosts_path=None, file_path=None):\n \"\"\"Import entries from a text file\n\n :param hosts_path: Path to the hosts file to update\n :param file_path: Path to the file containing the hosts entries to import\n :return: A dict containing the result and user message to output\n \"\"\"\n if hosts_path and not os.path.exists(hosts_path):\n return {'result': 'failed', 'message': 'Cannot read hosts file: {0}'.format(hosts_path)}\n if not os.path.exists(file_path):\n return {'result': 'failed', 'message': 'Cannot read import file: {0}'.format(file_path)}\n else:\n hosts = Hosts(path=hosts_path)\n pre_count = len(hosts.entries)\n import_file_output = hosts.import_file(import_file_path=file_path)\n post_count = len(hosts.entries)\n write_result = import_file_output.get('write_result')\n message = 'New entries:\\t{0}\\nTotal entries:\\t{1}\\n'.format(\n post_count - pre_count,\n write_result.get('total_written')\n )\n return {'result': import_file_output.get('result'),\n 'message': message}\n\n\ndef import_from_url(hosts_path=None, url=None):\n \"\"\"Import entries from a text file found on a specific URL\n\n :param hosts_path: Path to the hosts file to update\n :param url: URL of the text file containing the hosts entries to import\n :return: A dict containing the result and user message to output\n \"\"\"\n hosts = Hosts(path=hosts_path)\n pre_count = len(hosts.entries)\n import_url_output = hosts.import_url(url=url)\n post_count = len(hosts.entries)\n write_result = import_url_output.get('write_result')\n message = 'New entries:\\t{0}\\nTotal entries:\\t{1}\\n'.format(\n post_count - pre_count,\n write_result.get('total_written')\n )\n return {'result': import_url_output.get('result'),\n 'message': message}\n\n\ndef remove(address_to_remove=None, names_to_remove=None, remove_from_path=None):\n \"\"\"Remove entries from a hosts file\n\n :param address_to_remove: An ipv4 or ipv6 address to remove\n :param names_to_remove: A list of names to remove\n :param remove_from_path: The path of the hosts file to remove entries from\n :return: A dict containing the result and user message to output\n \"\"\"\n hosts = Hosts(path=remove_from_path)\n if address_to_remove or names_to_remove:\n num_before = hosts.count()\n hosts.remove_all_matching(address=address_to_remove, name=names_to_remove)\n hosts.write()\n difference = num_before - hosts.count()\n if difference:\n if difference > 1:\n str_entry = 'entries'\n else:\n str_entry = 'entry'\n return {'result': 'success',\n 'message': 'Removed {0} {1}'.format(difference, str_entry)}\n else:\n return {'result': 'failed',\n 'message': 'No matching entries found'}\n\n\ndef strip_entry_value(entry_value):\n \"\"\"Strip white space from a string or list of strings\n\n :param entry_value: value to strip spaces from\n :return: value minus the leading and trailing spaces\n \"\"\"\n if isinstance(entry_value, list):\n new_list = []\n for value in entry_value:\n new_list.append(value.strip())\n return ' '.join(new_list)\n if isinstance(entry_value, str):\n return entry_value.strip()\n\n\ndef real_main():\n \"\"\" The function called from the script\n\n :return: None\n \"\"\"\n arguments = docopt(__doc__, version='0.1.3')\n entry = arguments.get('ENTRY')\n quiet = arguments.get('--quiet')\n path = arguments.get('--path')\n force = arguments.get('--force')\n backup = arguments.get('--backup')\n address = arguments.get('--address')\n names = arguments.get('--names')\n input_file = arguments.get('--input-file')\n input_url = arguments.get('--input-url')\n result = None\n\n if not path:\n if sys.platform.startswith('win'):\n path = r'c:\\windows\\system32\\drivers\\etc\\hosts'\n else:\n path = '/etc/hosts'\n\n if not is_readable(path):\n output_message({'result': 'failed',\n 'message': 'Unable to read path: {0}.'.format(path)})\n\n new_entry = None\n if entry:\n new_entry = strip_entry_value(entry)\n\n if backup:\n result = backup_hosts(source=path)\n if result.get('result') == 'success':\n result['result'] = 'continue'\n output_message(result, quiet=quiet)\n\n if arguments.get('add'):\n if not is_writeable(path):\n result = {'result': 'failed',\n 'message': 'Unable to write to: {0}'.format(path)}\n if new_entry:\n result = add(entry_line=new_entry, hosts_path=path, force_add=force)\n if input_file:\n result = import_from_file(hosts_path=path, file_path=input_file)\n if input_url:\n result = import_from_url(hosts_path=path, url=input_url)\n else:\n if arguments.get('remove'):\n result = remove(address_to_remove=address, names_to_remove=names, remove_from_path=path)\n if result:\n output_message(result, quiet=quiet)\n\n\nif __name__ == '__main__':\n real_main()\n"},"size":{"kind":"number","value":9467,"string":"9,467"}}},{"rowIdx":128337,"cells":{"max_stars_repo_path":{"kind":"string","value":"fdns/app.py"},"max_stars_repo_name":{"kind":"string","value":"zhsj/httpdns"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171088"},"content":{"kind":"string","value":"import asyncio\nimport dnslib\nimport logging\nfrom .provider.http.google import HTTPGoogleResolver\n\nlogging.basicConfig(level=logging.DEBUG)\n_LOG = logging.getLogger(__name__)\n\npseudo_edns_client = '192.168.127.12'\n\n\nclass DNSServerProtocol(asyncio.DatagramProtocol):\n def __init__(self):\n self.resolver = HTTPGoogleResolver()\n\n def connection_made(self, transport):\n self.transport = transport\n\n def datagram_received(self, data, addr):\n _LOG.info('Received from '+str(addr))\n asyncio.ensure_future(self.handle(data, addr))\n\n async def handle(self, data, addr):\n record = dnslib.DNSRecord.parse(data)\n question = record.questions[0]\n qname = str(question.qname)\n qtype = question.qtype\n ans = await self.resolver.resolve(qname, qtype, pseudo_edns_client)\n for rr in ans:\n zone_format = \"{rname} {ttl} IN {rtype_name} {rdata}\"\n _rr = {\n 'rname': rr[0],\n 'ttl': rr[1],\n 'rtype_name': dnslib.QTYPE.forward[rr[2]],\n 'rdata': rr[3]\n }\n zone = zone_format.format(**_rr)\n _LOG.debug(zone)\n record.add_answer(*dnslib.RR.fromZone(zone))\n _LOG.info('Send to '+str(addr))\n self.transport.sendto(record.pack(), addr)\n\n\nclass DNSServer:\n def __init__(self, loop):\n self.loop = loop\n\n async def start(self):\n _LOG.info(\"Starting UDP server\")\n self.transport, self.proto = await self.loop.create_datagram_endpoint(\n DNSServerProtocol, local_addr=('127.0.0.1', 9999))\n\n\ndef main():\n async def stop(self):\n self.transport.close()\n\n loop = asyncio.get_event_loop()\n server = DNSServer(loop)\n asyncio.ensure_future(server.start())\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n loop.run_until_complete(server.stop())\n loop.close()\n\nif __name__ == '__main__':\n main()\n"},"size":{"kind":"number","value":1960,"string":"1,960"}}},{"rowIdx":128338,"cells":{"max_stars_repo_path":{"kind":"string","value":"Analysis/Hugh Blakemore/.ipynb_checkpoints/project_functions-checkpoint.py"},"max_stars_repo_name":{"kind":"string","value":"data301-2020-winter2/course-project-group_1017"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171781"},"content":{"kind":"string","value":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport os\nsns.set_style(\"ticks\")\nsns.set_theme(\"paper\")\ndef load_and_process(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .assign(Over_BMI= np.where(df1['bmi'] > 24.9 ,'Yes','No'))\n .assign(Under_BMI= np.where(df1['bmi'] < 18.5 ,'Yes','No'))\n .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n os.chdir(cwd)\n \n return df2\n\n\ndef Health(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfH=(df2[(df2['bmi'] >= 18.5) & (df2['bmi'] <=24.9 ) &(df2['smoker'] == 'no') ])\n dfH= dfH.reset_index(drop=True)\n os.chdir(cwd)\n return dfH\n\ndef unHealth(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n \n dfuH = (df2[(df2['bmi'] < 18.5) | (df2['bmi'] >24.9 ) | (df2['smoker'] == 'yes') ])\n dfuH = dfuH.reset_index(drop=True)\n os.chdir(cwd)\n return dfuH\n\n\n\n\ndef plotAvC(df):\n g=sns.lmplot(x='age', y='charges',data=df, \n scatter_kws={'s': 100, 'linewidth': 0.5, 'edgecolor': 'w'})\n return g\ndef brpltEC(df):\n g = sns.countplot(x=\"Excess_charges\",data=df)\n return g\ndef BrPltECD(df):\n g=sns.histplot(\n df, x=\"Excess_charges\", element=\"bars\",\n stat=\"density\",multiple=\"dodge\"\n )\n return g\n\ndef BrPltECDh(df):\n g=sns.histplot(\n df, x=\"Excess_charges\", element=\"bars\",\n stat=\"density\",multiple=\"dodge\",hue=\"smoker\"\n )\n return g\ndef BrPltECDB(df):\n g=sns.histplot(\n df, x=\"Excess_charges\", element=\"bars\",\n stat=\"density\",multiple=\"dodge\",hue=\"bmi\"\n )\n return g\ndef smoker(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfuH=(df2[(df2['bmi'] >= 18.5) & (df2['bmi'] <= 24.9 ) & (df2['smoker'] == 'yes') ])\n dfuH = dfuH.reset_index(drop=True)\n return dfuH\n\ndef underBmi(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfuH=(df2[(df2['bmi'] < 18.5) & (df2['smoker'] == 'no') ])\n dfuH = dfuH.reset_index(drop=True)\n os.chdir(cwd)\n return dfuH\n\ndef overBmi(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfuH=(df2[(df2['bmi'] > 24.9 ) & (df2['smoker']=='no')])\n dfuH = dfuH.reset_index(drop=True)\n os.chdir(cwd)\n return dfuH\n\ndef BoxPlt(df):\n g=sns.boxplot(x='Healthy',y='charges',data=df)\n return g\ndef BoxPlts(df):\n g=sns.boxplot(x='smoker',y='charges',data=df)\n return g\ndef BoxPltub(df):\n df1=(df[(df['bmi'] <=24.9 ) &(df['smoker'] == 'no') ])\n g=sns.boxplot(x='Under_BMI',y='charges',data=df1)\n return g\ndef BoxPltob(df):\n df1=(df[(df['bmi'] >=18.5 ) &(df['smoker'] == 'no') ])\n g=sns.boxplot(x='Over_BMI',y='charges',data=df1)\n return g\ndef mean(df):\n dfm=df['charges'].mean()\n dfmr = round(dfm,2)\n return dfmr\ndef allsmoker(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfuH=(df2[(df2['smoker'] == 'yes')])\n dfuH = dfuH.reset_index(drop=True)\n os.chdir(cwd)\n return dfuH\n\ndef obese(cwd):\n os.chdir('../..')\n os.chdir(\"Data/Raw\")\n df1 = (\n pd.read_csv('medical_expenses.csv')\n .rename({'children':'Dependents'},axis=1)\n .dropna(subset=['charges'])\n .drop(['region','Dependents'],axis=1)\n .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'})\n )\n df2=(df1\n .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No'))\n .assign(Over_BMI= np.where(df1['bmi'] > 24.9 ,'Yes','No'))\n .round({\"charges\":2,\"bmi\":1})\n .sort_values('charges',ascending=True)\n .reset_index(drop=True) \n )\n\n\n dfuH=(df2[(df2['bmi'] > 40.0 ) & (df2['smoker']=='no')])\n dfuH = dfuH.reset_index(drop=True)\n os.chdir(cwd)\n return dfuH\n\ndef RawDir():\n cwd=os.getcwd()\n os.chdir('../..')\n cwdm=os.getcwd()\n dir = os.chdir(\"Data/Raw\")\n return dir\n \ndef returnDir(cwd):\n dir = os.chdir(cwd)\n return dir\n"},"size":{"kind":"number","value":7795,"string":"7,795"}}},{"rowIdx":128339,"cells":{"max_stars_repo_path":{"kind":"string","value":"module1-introduction-to-sql/buddymove_holidayiq.py"},"max_stars_repo_name":{"kind":"string","value":"nchibana/DS-Unit-3-Sprint-2-SQL-and-Databases"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172020"},"content":{"kind":"string","value":"import sqlite3\nimport pandas as pd\n\ndf = pd.read_csv('buddymove_holidayiq.csv')\nconn = sqlite3.connect('buddymove_holidayiq.sqlite3')\ndf.to_sql('review', con=conn, if_exists='replace')\n\ndef sql_fetch(conn):\n cursor = conn.cursor()\n\n \"\"\" Get total number of rows\"\"\"\n\n query1 = '''SELECT count(*)\n FROM review;'''\n cursor.execute(query1)\n rows = cursor.fetchall()\n for row in rows:\n print(f'Total number of rows: {row[0]}')\n\n\n \"\"\" How many users who reviewed at least 100 Nature in the category also reviewed at least 100 in the Shopping category?\"\"\"\n\n query2 = '''SELECT COUNT(\"User Id\")from review\n WHERE Nature >= 100 AND Shopping >=100;'''\n cursor.execute(query2)\n rows2 = cursor.fetchall()\n for row in rows2:\n print(f'Users who reviewed at least 100 Nature and Shopping: {row[0]}')\n\n\n \"\"\"What are the average number of reviews for each category?\"\"\"\n\n query3 = '''SELECT AVG(Sports), AVG(Religious), AVG(Nature),\n AVG(Theatre), AVG(Shopping), AVG(Picnic)\n FROM review'''\n\n cursor.execute(query3)\n rows3 = cursor.fetchall()\n rows_result = [item for t in rows3 for item in t]\n labels = ['Sports','Religious','Nature','Theatre','Shopping', 'Picnic']\n for label, row in zip(labels, rows_result):\n print(f'Avergage number of {label} reviews: {row:.2f}')\n\n cursor.close()\n conn.commit()\n\nsql_fetch(conn)\n\n"},"size":{"kind":"number","value":1426,"string":"1,426"}}},{"rowIdx":128340,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/LAC/triedtree.py"},"max_stars_repo_name":{"kind":"string","value":"lemonsuan/lac"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172107"},"content":{"kind":"string","value":"# -*- coding: UTF-8 -*-\n################################################################################\n#\n# Copyright (c) 2020 Baidu, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#################################################################################\n\n\"\"\"\n该模块实现Tried树,用于进行词典的多模匹配\n\"\"\"\n\nclass Node(object):\n \"\"\"Trie树的结点.\n\n Attributes:\n next: dict类型,指向子结点\n length: int类型,判断节点是否为单词\n \"\"\"\n __slots__ = ['next', 'length']\n\n def __init__(self):\n \"\"\"初始化空节点.\"\"\"\n self.next = {}\n self.length = -1\n\n\nclass TriedTree(object):\n \"\"\"实现Tried树的类\n\n Attributes:\n __root: Node类型,Tried树根节点\n \"\"\"\n\n def __init__(self):\n \"\"\"初始化TriedTree的根节点__root\"\"\"\n self.__root = Node()\n\n def add_word(self, word):\n \"\"\"添加单词word到Trie树中\"\"\"\n current = self.__root\n for char in word:\n current = current.next.setdefault(char, Node())\n current.length = len(word)\n\n def make(self):\n \"\"\"nothing to do\"\"\"\n pass\n\n def search(self, content):\n \"\"\"前向最大匹配.\n\n 对content的文本进行多模匹配,返回后向最大匹配的结果.\n\n Args:\n content: string类型, 用于多模匹配的字符串\n\n Returns:\n list类型, 最大匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如:\n [(0, 2), [4, 7]]\n\n \"\"\"\n result = []\n\n length = len(content)\n current_position = 0\n end_position = 0\n while current_position < length:\n p = self.__root\n matches = []\n for key in content[current_position:]:\n p = p.next.get(key, None)\n if not p:\n break\n if p.length > 0:\n end_position = current_position + p.length\n matches.append((current_position, end_position))\n if len(matches) > 0:\n result.append((matches[-1][0], matches[-1][1]))\n current_position = max(current_position + 1, end_position)\n\n return result\n\n def search_all(self, content):\n \"\"\"多模匹配的完全匹配.\n\n 对content的文本进行多模匹配,返回所有匹配结果\n\n Args:\n content: string类型, 用于多模匹配的字符串\n\n Returns:\n list类型, 所有匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如:\n [(0, 2), [4, 7]]\n\n \"\"\"\n result = []\n\n length = len(content)\n for current_position in range(length):\n p = self.__root\n for key in content[current_position:]:\n p = p.next.get(key, None)\n if not p:\n break\n if p.length > 0:\n result.append(\n (current_position, current_position + p.length))\n\n return result\n\n\nif __name__ == \"__main__\":\n words = [\"百度\", \"家\", \"家家\", \"高科技\", \"技公\", \"科技\", \"科技公司\"]\n string = '百度是家高科技公司'\n tree = TriedTree()\n for word in words:\n tree.add_word(word)\n\n for begin, end in tree.search(string):\n print(string[begin:end])\n\n\n"},"size":{"kind":"number","value":3468,"string":"3,468"}}},{"rowIdx":128341,"cells":{"max_stars_repo_path":{"kind":"string","value":"autoImport.py"},"max_stars_repo_name":{"kind":"string","value":"emirhanbilge/AutoLoginandSendMessageInstagram"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170379"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 27 15:55:00 2021\r\n\r\n@author: EBB\r\n\"\"\"\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nimport time \r\n\r\n\r\ndriver = webdriver.Chrome()\r\ndriver.get(\"https://www.instagram.com/accounts/login/?hl=tr\")\r\n\r\n\r\ndef clickFunctions(xpathURL):\r\n while(1): ## internet hızına göre elementin bulunması için geçecek zamanı bilmiyoruz onun için bu yapıyı kullanıyorum.\r\n try:\r\n driver.find_element(By.XPATH, xpathURL ).click() # bulunca tıklama ve break yapma\r\n break\r\n except:\r\n time.sleep(1)\r\n\r\ndef getElementFunction(xpathURL):\r\n e = 1 # elementi return ettirebilmemiz için objeyi eşitliyorum bunu tryda doğrudan return yaparsam none türüne düşebilir\r\n while(1):\r\n try:\r\n e = driver.find_element(By.XPATH, xpathURL)\r\n break\r\n except:\r\n time.sleep(1)\r\n return e\r\n\r\n\r\nusername = getElementFunction('//*[@id=\"loginForm\"]/div/div[1]/div/label/input') # Kullanıcı adı olan yeri alma\r\npassword = getElementFunction('//*[@id=\"loginForm\"]/div/div[2]/div/label/input') # Parola kısmını alma \r\n\r\n# Kullanıcı adını girme , parola girme ve butona basma\r\nusername.send_keys(\"Kanıcı gir\") # Kullanıcı adını yolla\r\npassword.send_keys(\"\") # Parolayı yolla \r\n\r\n\r\nclickFunctions('//*[@id=\"loginForm\"]/div/div[3]/button/div') # giriş butonuna tıklama\r\ntime.sleep(5) \r\n\r\n#anasayfa \r\ndriver.get(\"https://www.instagram.com/direct/new/\") # mesajlar kısmını açma\r\n\r\ntime.sleep(2) #sayfanın yüklenmesini bekleme\r\nclickFunctions('/html/body/div[6]/div/div/div/div[3]/button[2]') # bildirimleri kapata tıklama\r\n\r\nmessagePage = getElementFunction('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input') #kullanıcı adı girme yerini bulma\r\nmessagePage.send_keys(\"emreakins0\")# kullanıcı adını girme\r\n\r\nclickFunctions('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button') # kullanıcıyı seçme\r\n\r\nclickFunctions('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button') #ilete tıklama\r\n\r\nmessageArea =getElementFunction('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea') #mesaj alanını bulma\r\nmessageArea.send_keys(\"Merhaba bu ebb'nin hazırladığı test kodudur\")# mesaj alanını doldurma\r\n\r\nclickFunctions('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button') #gönder butonuna tıklama\r\n"},"size":{"kind":"number","value":2465,"string":"2,465"}}},{"rowIdx":128342,"cells":{"max_stars_repo_path":{"kind":"string","value":"battlab_one.py"},"max_stars_repo_name":{"kind":"string","value":"rfc6919/battlab"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171626"},"content":{"kind":"string","value":"#!/usr/local/bin/python3\n\nimport serial\nimport struct\nimport collections\n\nTransaction = collections.namedtuple(\n 'Transaction', 'cmd,response_size,postprocess', defaults=[0, bytes.hex])\n\ntransactions = {\n 'set_voltage_1v2': Transaction(cmd=b'a'),\n 'set_voltage_1v5': Transaction(cmd=b'b'),\n 'set_voltage_2v4': Transaction(cmd=b'c'),\n 'set_voltage_3v0': Transaction(cmd=b'd'),\n 'set_voltage_3v2': Transaction(cmd=b'o'),\n 'set_voltage_3v6': Transaction(cmd=b'n'),\n 'set_voltage_3v7': Transaction(cmd=b'e'),\n 'set_voltage_4v2': Transaction(cmd=b'f'),\n 'set_voltage_4v5': Transaction(cmd=b'g'),\n\n 'set_psu_on': Transaction(cmd=b'h'),\n 'set_psu_off': Transaction(cmd=b'i'),\n\n 'get_calibration': Transaction(cmd=b'j', response_size=34, postprocess=lambda b: b),\n 'get_config': Transaction(cmd=b'm', response_size=4),\n 'get_version': Transaction(cmd=b'p', response_size=2, postprocess=lambda b: int.from_bytes(b, 'big')/1000),\n\n 'set_current_low': Transaction(cmd=b'k'),\n 'set_current_high': Transaction(cmd=b'l'),\n\n 'set_averages_1': Transaction(cmd=b's'), # only in version > 1001\n 'set_averages_4': Transaction(cmd=b't'),\n 'set_averages_16': Transaction(cmd=b'u'),\n 'set_averages_64': Transaction(cmd=b'v'),\n\n 'reset': Transaction(cmd=b'w', postprocess=lambda b: time.sleep(1)), # only in version > 1002\n\n 'set_sample_trig': Transaction(cmd=b'x'),\n 'set_sample_off': Transaction(cmd=b'y'),\n 'set_sample_on': Transaction(cmd=b'z'),\n}\n\n# indexes into the returned calibration data for sense resistor scaling values\ncal_indexes = {\n 'set_voltage_1v2': 0,\n 'set_voltage_1v5': 1,\n 'set_voltage_2v4': 2,\n 'set_voltage_3v0': 3,\n 'set_voltage_3v2': 3,\n 'set_voltage_3v6': 4,\n 'set_voltage_3v7': 5,\n 'set_voltage_4v2': 6,\n 'set_voltage_4v5': 7,\n}\n\n# indexes into the returned calibration data for sleep current offset values\noffset_indexes = {\n 'set_voltage_1v2': 8,\n 'set_voltage_1v5': 9,\n 'set_voltage_2v4': 10,\n 'set_voltage_3v0': 11,\n 'set_voltage_3v2': 12,\n 'set_voltage_3v6': 13,\n 'set_voltage_3v7': 14,\n 'set_voltage_4v2': 15,\n 'set_voltage_4v5': 16,\n}\n\nclass BattLabOne:\n\n def __init__(self, device=None):\n self.sp = None\n self.calibration_data = None\n\n self.cal_adj = None\n self.offset = None\n self.low_current = None\n\n if device:\n self.connect(device)\n\n def connect(self, device):\n self.sp = serial.Serial(\n device, baudrate=115200, parity='N', bytesize=8, stopbits=1)\n self.sp.reset_input_buffer()\n self.sp.reset_output_buffer()\n self.calibrate()\n return self\n\n def calibrate(self):\n calibration_data_raw = self._do_transaction('get_calibration')\n self.calibration_data = struct.unpack('>17H', calibration_data_raw)\n\n def _do_transaction(self, command):\n transaction = transactions[command]\n self.sp.write(transaction.cmd)\n response = self.sp.read(transaction.response_size)\n\n # give the firmware time to do whatever, since we can't know when it's completed\n if transaction.response_size == 0:\n time.sleep(0.01)\n\n # update calibration and offset if we've just set the supply voltage\n if command.startswith('set_voltage_'):\n self.cal_adj = self.calibration_data[cal_indexes[command]]/1000\n self.offset = self.calibration_data[offset_indexes[command]]\n\n # remember if we've got the low-current sense resistor enabled\n if command.startswith('set_current_'):\n self.low_current = command == 'set_current_low'\n\n return transaction.postprocess(response)\n\n def get_sample(self):\n raw_sample = self.sp.read(2)\n sample = int.from_bytes(raw_sample, 'big')\n sense_resistor_scale = 99 if self.low_current else self.cal_adj\n lsb = 0.0025 # magic value?\n current_mA = sample * lsb / sense_resistor_scale #- self.offset\n return current_mA\n\n\nif __name__ == '__main__':\n import sys\n import time\n import serial.tools.list_ports\n\n all_ports = serial.tools.list_ports.comports()\n battlab_one_ports = [p for p in all_ports if p.vid == 0x0403 and p.pid == 0x6001 and p.serial_number[:2] == \"BB\"]\n if len(battlab_one_ports) == 0:\n print('EE: no BattLab One found', file=sys.stderr)\n raise RuntimeError('no device found')\n elif len(battlab_one_ports) > 1:\n print('EE: multiple BattLab Ones (BattLabs One?) found', file=sys.stderr)\n raise RuntimeError('too many devices found')\n device = battlab_one_ports[0].device\n print(f'II: found BattLab One at {device}')\n b = BattLabOne(device)\n print('II: resetting')\n b._do_transaction('reset')\n print('II: firmware version {}'.format(b._do_transaction('get_version')))\n\n cmds = 'set_voltage_1v2 set_current_high set_averages_64 set_psu_on'.split(' ')\n for cmd in cmds:\n print(f'II: sending command {cmd}')\n b._do_transaction(cmd)\n time.sleep(10)\n print(f'II: starting sampling')\n b._do_transaction('set_sample_on')\n\n sample_count = 10000\n sample_sum = 0\n sample_min = sys.float_info.max\n sample_max = 0\n start_time = time.time()\n for n in range(sample_count):\n current_mA = b.get_sample()\n print(current_mA, file=f)\n sample_sum += current_mA\n sample_min = min(sample_min, current_mA)\n sample_max = max(sample_max, current_mA)\n end_time = time.time()\n b._do_transaction('set_sample_off')\n b.sp.reset_input_buffer()\n\n print(f'II: got {sample_count} samples in {end_time-start_time}s')\n print(f'II: cal_adj:{b.cal_adj}')\n print(f'II: min: {sample_min} max: {sample_max} avg: {sample_sum/sample_count}')\n"},"size":{"kind":"number","value":5853,"string":"5,853"}}},{"rowIdx":128343,"cells":{"max_stars_repo_path":{"kind":"string","value":"Leetcode/Competition/2.py"},"max_stars_repo_name":{"kind":"string","value":"ZR-Huang/AlgorithmPractices"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170479"},"content":{"kind":"string","value":"from typing import List\nclass Solution:\n def numTimesAllBlue(self, light: List[int]) -> int:\n # brute force\n # time limitation exceeded\n ans = 0\n length = len(light)\n states = ['0'] * len(light)\n visited = dict()\n for t, i in enumerate(light):\n all_light = True\n key = ''.join(states[:i])\n if key in visited:\n all_light = visited[key]\n else:\n for j in range(i-1):\n if states[j] == '0':\n all_light = False\n break\n visited[''.join(states[:i])] = all_light\n states[i-1] = '2' if all_light else '1'\n\n j = i\n while j < length and states[j] != '0':\n states[j] = '2' if states[j-1] == '2' else states[j]\n j += 1\n \n all_blue = True\n if states[j-1]=='2':\n while j < length:\n if states[j] == '1':\n all_blue = False\n break\n j += 1\n ans += 1 if all_blue else 0\n return ans\n\n def numTimesAllBlue(self, light: List[int]) -> int:\n # time limitation exceeded\n length = len(light)\n states = [0]*length\n light_not_blue = set()\n ans = 0\n for t, i in enumerate(light):\n if i-1>0:\n if states[i-2]==2:\n states[i-1] = 2\n else:\n states[i-1] = 1\n light_not_blue.add(i-1)\n else:\n states[i-1] = 2\n\n j = i\n while j < length and states[j] > 0:\n if states[j] == 1 and states[j-1]==2:\n states[j] = 2\n light_not_blue.remove(j)\n j+=1\n if not light_not_blue:\n ans += 1\n return ans\n\n def numTimesAllBlue(self, light: List[int]) -> int:\n currMax, ans = 0, 0\n for t, num in enumerate(light):\n currMax = max([currMax, num])\n if currMax == t + 1:\n ans += 1\n return ans\n\nprint(Solution().numTimesAllBlue([2,1,3,5,4]))\nprint(Solution().numTimesAllBlue([3,2,4,1,5]))\nprint(Solution().numTimesAllBlue([4,1,2,3]))\nprint(Solution().numTimesAllBlue([2,1,4,3,6,5]))\nprint(Solution().numTimesAllBlue([1,2,3,4,5,6]))"},"size":{"kind":"number","value":2444,"string":"2,444"}}},{"rowIdx":128344,"cells":{"max_stars_repo_path":{"kind":"string","value":"cmds/abos.space.py"},"max_stars_repo_name":{"kind":"string","value":"abos5/pythontutor"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171051"},"content":{"kind":"string","value":"#! /usr/bin/python\nimport sys\nimport getopt\n\n_debug = 0\n\n\ndef usage(err=False):\n print '''\n ssh:\n abos.space -option=value\n '''\n\n if err:\n print 'invalid argv!'\n\n\ndef main(argv):\n # retrive options\n try:\n opts, args = getopt.getopt(argv, \"hg:d:\", [\"hey\", ])\n except getopt.GetoptError:\n usage(True)\n sys.exit(2)\n\n # working on options\n for opt, arg in opts:\n if opt in (\"-h\", \"-help\"):\n usage()\n sys.exit()\n\n elif opt == '-d':\n global _debug\n _debug = 1\n elif opt in (\"-g\", \"-grammar\"):\n print arg\n\n\nif __name__ == '__main__':\n argv = sys.argv\n argv.pop(0)\n main(sys.argv)\n\n# sys.\n# eof\n"},"size":{"kind":"number","value":734,"string":"734"}}},{"rowIdx":128345,"cells":{"max_stars_repo_path":{"kind":"string","value":"attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/testcases/hovers_test.py"},"max_stars_repo_name":{"kind":"string","value":"Agriad/devops-course"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171854"},"content":{"kind":"string","value":"\"\"\" \n@author: \n@email: \n@date: 20-May-18\n\"\"\"\nfrom pages.hovers_page import HoversPage\nfrom pages.welcome_page import WelcomePage\nfrom utility.drivermanager import DriverManagerFirefox, DriverManagerChrome\nfrom nose.plugins.attrib import attr\n\n@attr(group=['kth'])\nclass HoversTestFirefox(DriverManagerFirefox):\n def test_hover_functionality(self):\n welcome_page = WelcomePage(self.driver)\n welcome_page.verify_welcome_page().click_on_link(\"Hovers\")\n\n hovers_page = HoversPage(self.driver)\n hovers_page.verify_hovers_functionality()\n\n\n@attr(group=['kth'])\nclass HoversTestChrome(DriverManagerChrome):\n def test_hover_functionality(self):\n welcome_page = WelcomePage(self.driver)\n welcome_page.verify_welcome_page().click_on_link(\"Hovers\")\n\n hovers_page = HoversPage(self.driver)\n hovers_page.verify_hovers_functionality()\n"},"size":{"kind":"number","value":896,"string":"896"}}},{"rowIdx":128346,"cells":{"max_stars_repo_path":{"kind":"string","value":"samples/web/content/apprtc/util_test.py"},"max_stars_repo_name":{"kind":"string","value":"jsmithersunique/thegetvip_RTCsamples"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170841"},"content":{"kind":"string","value":"# Copyright 2014 Google Inc. All Rights Reserved.\n\nimport unittest\n\nimport util\n\nclass UtilTest(unittest.TestCase):\n def testGetMessageFromJson(self):\n self.assertEqual(None, util.get_message_from_json(\"\"))\n self.assertEqual({}, util.get_message_from_json(\"{}\"))\n self.assertEqual(\n {\"a\": \"b\",\"c\": False, \"d\": 1, \"e\" : [1,2,\"3\"]}, \n util.get_message_from_json('{\"a\":\"b\",\"c\":false,\"d\":1,\"e\":[1,2,\"3\"]}'))\n \n def testHasMsgField(self):\n testObject = {\n \"a\": False,\n \"b\": \"str\",\n \"c\": None,\n \"d\": {},\n \"e\": [1, 2, \"3\"],\n \"f\": [],\n \"g\": {'A': 1}\n }\n self.assertEqual(\n True,\n util.has_msg_field(testObject, \"a\", bool))\n self.assertEqual(\n False,\n util.has_msg_field(testObject, \"a\", basestring))\n self.assertEqual(\n False,\n util.has_msg_field(testObject, \"c\", bool))\n self.assertEqual(\n False,\n util.has_msg_field(testObject, \"d\", dict))\n self.assertEqual(\n True,\n util.has_msg_field(testObject, \"e\", list))\n self.assertEqual(\n False,\n util.has_msg_field(testObject, \"f\", list))\n self.assertEqual(\n True,\n util.has_msg_field(testObject, \"g\", dict))\n self.assertEqual(\n False,\n util.has_msg_field(testObject, \"h\", dict))\n self.assertEqual(\n False,\n util.has_msg_field(None, \"a\", dict))\n\n def testHasMsgFields(self):\n testObject = {\n \"a\": False,\n \"b\": \"str\",\n \"c\": None,\n \"d\": {},\n \"e\": [1, 2, \"3\"],\n \"f\": [],\n \"g\": {'A': 1}\n }\n self.assertEqual(\n True,\n util.has_msg_fields(\n testObject, \n ((\"a\", bool), (\"b\", basestring), (\"e\", list))))\n self.assertEqual(\n False,\n util.has_msg_fields(\n testObject, \n ((\"a\", bool), (\"b\", bool), (\"e\", list))))\n self.assertEqual(\n False,\n util.has_msg_fields(\n testObject, \n ((\"a\", bool), (\"h\", basestring), (\"e\", list))))\n\n def testGenerateRandomGeneratesStringOfRightLength(self):\n self.assertEqual(17, len(util.generate_random(17)))\n self.assertEqual(23, len(util.generate_random(23)))\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":2251,"string":"2,251"}}},{"rowIdx":128347,"cells":{"max_stars_repo_path":{"kind":"string","value":"openfda/res/tests/scrape_historic_test.py"},"max_stars_repo_name":{"kind":"string","value":"hobochili/openfda"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170959"},"content":{"kind":"string","value":"#!/usr/bin/python\n\nimport unittest\nfrom openfda.res import scrape_historic\nimport os\nimport simplejson as json\n\nclass ScrapeHistoricUnitTest(unittest.TestCase):\n 'Scrape Historic Unit Test'\n\n def set_up(self):\n pass\n\n def test_scrape_june__13__2012(self):\n mydir = os.path.dirname(__file__)\n html = open(mydir + '/data/ucm308307.htm')\n expected_json = open(mydir + '/data/ucm308307.json').read()\n scraped_list = scrape_historic.scrape_report(html.read())\n actual_json = '\\n'.join([json.dumps(s) for s in scraped_list])\n self.assertEqual(expected_json, actual_json, mydir + '/data/ucm308307.json')\n\n def test_scrape_one_recall(self):\n mydir = os.path.dirname(__file__)\n recall = open(mydir + '/data/one-recall.txt').read().strip()\n expected_recall_json = open(mydir + '/data/one-recall.json').read().strip()\n actual_recall_json = json.dumps(scrape_historic.scrape_one_recall(recall))\n self.assertEqual(expected_recall_json, actual_recall_json, mydir + '/data/one-recall.json')\n\nif __name__ == '__main__':\n unittest.main()\n"},"size":{"kind":"number","value":1062,"string":"1,062"}}},{"rowIdx":128348,"cells":{"max_stars_repo_path":{"kind":"string","value":"Labs/ValueFunctionIteration/VFI_Solutions.py"},"max_stars_repo_name":{"kind":"string","value":"jessicaleete/numerical_computing"},"max_stars_count":{"kind":"number","value":10,"string":"10"},"id":{"kind":"string","value":"2172268"},"content":{"kind":"string","value":"#================================================\n#Solutions To Value Function Iteration Lab\n#================================================\n#\"Problem 1\"\n#import scipy as sp\n#from matplotlib import pyplot as plt\n#from matplotlib import cm\n#from mpl_toolkits . mplot3d import Axes3D\n#\n#\n#beta = 0.9;\n#T = 10;\n#N = 100;\n#u = lambda c: sp.sqrt(c);\n#W = sp.linspace(0,1,N);\n#X, Y = sp.meshgrid(W,W);\n#Wdiff = Y-X\n#index = Wdiff <0;\n#Wdiff[index] = 0;\n#util_grid = u(Wdiff);\n#util_grid[index] = -10**10;\n#V = sp.zeros((N,T+2));\n#psi = sp.zeros((N,T+1));\n#\n#\n#for k in xrange(T,-1,-1):\n# val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1));\n# vt = sp.amax(val, axis = 1);\n# psi_ind = sp.argmax(val,axis = 1)\n# V[:,k] = vt;\n# psi[:,k] = W[psi_ind];\n#\n#\n#\n#x=sp.arange(0,N)\n#y=sp.arange(0,T+2)\n#X,Y=sp.meshgrid(x,y)\n#fig1 = plt.figure()\n#ax1= Axes3D(fig1)\n#ax1.plot_surface(W[X],Y,sp.transpose(V), cmap=cm.coolwarm)\n#plt.show ()\n#\n#fig2 = plt.figure() \n#ax2 = Axes3D(fig2)\n#y = sp.arange(0,T+1)\n#X,Y=sp.meshgrid(x,y)\n#ax2.plot_surface(W[X],Y,sp.transpose(psi), cmap = cm.coolwarm)\n#plt.show()\n\n\n#================================================\n\"Problem 2\"\n#import scipy as sp\n#from matplotlib import pyplot as plt\n#\n#beta = 0.9;\n#T = 1000;\n#N = 100;\n#u = lambda c: sp.sqrt(c);\n#W = sp.linspace(0,1,N);\n#X, Y = sp.meshgrid(W,W);\n#Wdiff = sp.transpose(X-Y);\n#index = Wdiff <0;\n#Wdiff[index] = 0;\n#util_grid = u(Wdiff);\n#util_grid[index] = -10**10;\n#V = sp.zeros((N,T+2));\n#psi = sp.zeros((N,T+1));\n#\n#\n#for k in xrange(T,-1,-1):\n# val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1));\n# vt = sp.amax(val, axis = 1);\n# psi_ind = sp.argmax(val,axis = 1)\n# V[:,k] = vt;\n# psi[:,k] = W[psi_ind];\n#\n#\n#\n#plt.plot(psi[99,:])\n\n\n#================================================\n#\"Problem 3\"\n#import scipy as sp\n#from matplotlib import pyplot as plt\n#\n#beta = 0.99\n#N = 1000\n#u = lambda c: sp.sqrt(c)\n#W = sp.linspace(0,1,N)\n#X, Y = sp.meshgrid(W,W)\n#Wdiff = sp.transpose(X-Y)\n#index = Wdiff <0\n#Wdiff[index] = 0\n#util_grid = u(Wdiff)\n#util_grid[index] = -10**10\n#\n#Vprime = sp.zeros((N,1))\n#psi = sp.zeros((N,1))\n#delta = 1.0\n#tol = 10**-9\n#it = 0\n#max_iter = 500\n#\n#while (delta >= tol) and (it < max_iter):\n# V = Vprime\n# it += 1;\n# print(it)\n# val = util_grid + beta*sp.transpose(V)\n# Vprime = sp.amax(val, axis = 1)\n# Vprime = Vprime.reshape((N,1))\n# psi_ind = sp.argmax(val,axis = 1)\n# psi = W[psi_ind]\n# delta = sp.dot(sp.transpose(Vprime - V),Vprime-V)\n\n\n#plt.plot(W,psi) \n#plt.show()"},"size":{"kind":"number","value":2569,"string":"2,569"}}},{"rowIdx":128349,"cells":{"max_stars_repo_path":{"kind":"string","value":"Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/management/commands/restore_asset_from_trashcan.py"},"max_stars_repo_name":{"kind":"string","value":"osoco/better-ways-of-thinking-about-software"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2171243"},"content":{"kind":"string","value":"\"\"\"Management command to restore assets from trash\"\"\"\n\n\nfrom django.core.management.base import BaseCommand\n\nfrom xmodule.contentstore.utils import restore_asset_from_trashcan\n\n\nclass Command(BaseCommand):\n \"\"\"Command class to handle asset restore\"\"\"\n help = '''Restore a deleted asset from the trashcan back to it's original course'''\n\n def add_arguments(self, parser):\n parser.add_argument('location')\n\n def handle(self, *args, **options):\n restore_asset_from_trashcan(options['location'])\n"},"size":{"kind":"number","value":518,"string":"518"}}},{"rowIdx":128350,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/main.py"},"max_stars_repo_name":{"kind":"string","value":"HaeckelK/journal-api"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172424"},"content":{"kind":"string","value":"from typing import List\nfrom datetime import datetime\n\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom . import crud, models, schemas\nfrom .database import SessionLocal, engine\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n\n\n# Dependency\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.post(\"/journals/\", response_model=schemas.Journal)\ndef create_journal(journal: schemas.JournalCreate, db: Session = Depends(get_db)):\n if journal.date != -1 and len(str(journal.date)) != 8:\n raise HTTPException(status_code=400, detail=\"date must be in yyyymmdd format\")\n\n if journal.date == -1:\n journal.date = datetime.today().strftime('%Y%m%d')\n\n db_journal = crud.get_journal(db, journal_id=journal.date)\n if db_journal:\n raise HTTPException(status_code=400, detail=f\"Journal with date {journal.date} already registered\")\n\n return crud.create_journal(db=db, journal=journal)\n\n\n@app.get(\"/journals/\", response_model=List[schemas.Journal])\ndef read_journals(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n journals = crud.get_journals(db, skip=skip, limit=limit)\n return journals\n\n\n@app.get(\"/journals/{journal_id}\", response_model=schemas.Journal)\ndef read_journal(journal_id: int, db: Session = Depends(get_db)):\n db_journal = crud.get_journal(db, journal_id=journal_id)\n if db_journal is None:\n raise HTTPException(status_code=404, detail=f\"Journal with id {journal_id} not found\")\n return db_journal\n\n\n@app.delete(\"/journals/{journal_id}\", response_model=schemas.Journal)\ndef delete_journal(journal_id: int, db: Session = Depends(get_db)):\n db_journal = crud.delete_journal(db, journal_id=journal_id)\n if db_journal is None:\n raise HTTPException(status_code=404, detail=f\"Journal with id {journal_id} not found\")\n return db_journal\n\n\n@app.post(\"/journals/{journal_id}/items/\", response_model=schemas.Item)\ndef create_item_for_journal(\n journal_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)\n):\n db_journal = crud.get_journal(db, journal_id=journal_id)\n if not db_journal:\n raise HTTPException(status_code=400, detail=f\"Journal with id {journal_id} not registered\")\n return crud.create_journal_item(db=db, item=item, journal_id=journal_id)\n\n\n@app.get(\"/items/\", response_model=List[schemas.Item])\ndef read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):\n items = crud.get_items(db, skip=skip, limit=limit)\n return items\n\n\n@app.delete(\"/items/{item_id}\", response_model=schemas.Item)\ndef delete_item(item_id: int, db: Session = Depends(get_db)):\n db_item = crud.delete_item(db, item_id=item_id)\n if db_item is None:\n raise HTTPException(status_code=404, detail=f\"Item with id {item_id} not found\")\n return db_item\n"},"size":{"kind":"number","value":2894,"string":"2,894"}}},{"rowIdx":128351,"cells":{"max_stars_repo_path":{"kind":"string","value":"mysql-dummy-data/main.py"},"max_stars_repo_name":{"kind":"string","value":"panticne/Teaching-HEIGVD-AMT-2019-Project-One"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2170473"},"content":{"kind":"string","value":"import collections, re\r\nimport time\r\nimport copy\r\nimport config\r\nfrom constants import *\r\nfrom models import *\r\n\r\ntables = dict()\r\ndummy_rows = dict()\r\n\r\ndef initial_read_phpmyadmin():\r\n with open(config.input_file, \"r\") as f:\r\n line = f.readline()\r\n current_table_name = \"\"\r\n while line:\r\n create_table = re.match(CREATE_TABLE_REGEX, line)\r\n alter_table = re.match(ALTER_TABLE_REGEX, line)\r\n primary_key = re.match(PRIMARY_KEY_REGEX_PHPMYADMIN, line)\r\n row_entry = re.match(TABLE_COLUMN_REGEX, line)\r\n row_entry_date = re.match(TABLE_COLUMN_REGEX_DATE, line)\r\n foreign_key = re.match(FOREIGN_KEY_REGEX_PHPMYADMIN, line)\r\n auto_increment = re.match(AUTO_INCREMENT_REGEX, line)\r\n if create_table:\r\n current_table_name = create_table.group('table_name')\r\n tables[current_table_name] = Table(current_table_name)\r\n elif row_entry:\r\n tables[current_table_name].columns.append(Column(row_entry.group('column_name'), row_entry.group('type'), length=int(row_entry.group('length'))))\r\n elif row_entry_date:\r\n tables[current_table_name].columns.append(Column(row_entry_date.group('column_name'), row_entry_date.group('type')))\r\n elif alter_table:\r\n current_table_name = alter_table.group('table_name')\r\n elif foreign_key:\r\n other_table_name = foreign_key.group('other_table')\r\n other_column_name = foreign_key.group('other_column')\r\n this_column_name = foreign_key.group('foreign_key')\r\n tables[current_table_name].add_foreign_key(this_column_name, tables[other_table_name], other_column_name)\r\n elif primary_key:\r\n column = Table.find_column(tables[current_table_name], primary_key.group('primary_key'))\r\n column.pk = True\r\n elif auto_increment:\r\n column = Table.find_column(tables[current_table_name], auto_increment.group('column_name'))\r\n column.auto_increment = True\r\n line = f.readline()\r\n #Original tables variable will be used for topological pruning of the graph,\r\n #the copy will be used as \r\n return copy.copy(tables)\r\n\r\ndef initial_read_mysqldump():\r\n foreign_keys = []\r\n with open(config.input_file, \"r\") as f:\r\n line = f.readline()\r\n current_table_name = \"\"\r\n while line:\r\n create_table = re.match(CREATE_TABLE_REGEX, line)\r\n primary_key = re.match(PRIMARY_KEY_REGEX_MYSQLDUMP, line)\r\n row_entry = re.match(TABLE_COLUMN_REGEX, line)\r\n row_entry_date = re.match(TABLE_COLUMN_REGEX_DATE, line)\r\n foreign_key = re.match(FOREIGN_KEY_REGEX_MYSQLDUMP, line)\r\n if create_table:\r\n current_table_name = create_table.group('table_name')\r\n tables[current_table_name] = Table(current_table_name)\r\n elif row_entry:\r\n column = Column(row_entry.group('column_name'), row_entry.group('type'), length=int(row_entry.group('length')))\r\n tables[current_table_name].columns.append(column)\r\n if 'AUTO_INCREMENT' in line:\r\n column.auto_increment = True\r\n elif row_entry_date:\r\n tables[current_table_name].columns.append(Column(row_entry_date.group('column_name'), row_entry_date.group('type')))\r\n elif foreign_key:\r\n other_table_name = foreign_key.group('other_table')\r\n other_column_name = foreign_key.group('other_column')\r\n this_column_name = foreign_key.group('foreign_key')\r\n foreign_keys.append((tables[current_table_name], this_column_name, other_table_name, other_column_name))\r\n elif primary_key:\r\n column = Table.find_column(tables[current_table_name], primary_key.group('primary_key'))\r\n column.pk = True\r\n line = f.readline()\r\n #Process foreign keys after structure has been set, otherwise there will be an error because\r\n #foreign keys might reference non-existing tables\r\n for table, this_column_name, other_table_name, other_column_name in foreign_keys:\r\n table.add_foreign_key(this_column_name, tables[other_table_name], other_column_name)\r\n return copy.copy(tables)\r\n\r\ndef write_to_sql():\r\n with open(config.input_file, 'r') as input, open(config.output_file, 'w') as output:\r\n line = input.readline()\r\n current_table_name = None\r\n while line:\r\n create_table = re.match(CREATE_TABLE_REGEX, line)\r\n if create_table:\r\n current_table_name = create_table.group('table_name')\r\n elif line.startswith(\")\"):\r\n output.write(line)\r\n line = input.readline()\r\n output.write(line)\r\n line = input.readline()\r\n for row in generate_insert_clause(current_table_name):\r\n output.write(row)\r\n continue\r\n output.write(line)\r\n line = input.readline()\r\n \r\ndef generate_insert_clause(table_name):\r\n table = tables_copy[table_name]\r\n columns = list(map(lambda column: \"`{}`\".format(column.name), table.columns))\r\n lines = [\"INSERT INTO `{0}` ({1}) VALUES\\n\".format(table_name, ', '.join(columns))]\r\n lines.extend(table.dummy_rows)\r\n return lines\r\n\r\nif __name__ == \"__main__\":\r\n import argparse\r\n\r\n parser = argparse.ArgumentParser(description=\"Generates dummy data given a MySQL dump file.\")\r\n parser.add_argument('input_file')\r\n parser.add_argument('--rows', help=\"Enter the desired number of rows.\", type=int, default=10)\r\n parser.add_argument('--phpmyadmin', help=\"Use this flag if the dump was exported using phpmyadmin\", action='store_true')\r\n parser.add_argument('--output', help=\"Set the output file name. Default is output.sql.\", type=str, default=\"output.sql\")\r\n args = parser.parse_args()\r\n config.row_count = args.rows\r\n config.input_file = args.input_file\r\n config.output_file = args.output\r\n tables_copy = initial_read_mysqldump() if not args.phpmyadmin else initial_read_phpmyadmin()\r\n while tables:\r\n queue = collections.deque()\r\n #Process tables that have no dependencies\r\n for table_name in list(tables):\r\n if tables[table_name].outdegree == 0:\r\n queue.append(tables[table_name])\r\n del tables[table_name]\r\n while queue:\r\n table = queue.popleft()\r\n for column in table.columns:\r\n column.generate_data()\r\n for child_table in table.child_tables:\r\n #Decrement parent table's outdegree by 1, now that this child table\r\n #has been processed\r\n child_table.outdegree -= 1\r\n table.insert_rows()\r\n dummy_rows[table.name] = table.dummy_rows\r\n write_to_sql()\r\n "},"size":{"kind":"number","value":7057,"string":"7,057"}}},{"rowIdx":128352,"cells":{"max_stars_repo_path":{"kind":"string","value":"data_serializers/chinese.py"},"max_stars_repo_name":{"kind":"string","value":"wjshan/qrcode"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171650"},"content":{"kind":"string","value":"from .base import SerializerAble\nimport codecs\nfrom ..version_serializers import Version\n\n\nclass ChineseSerializer(SerializerAble):\n flag = '1101'\n\n @staticmethod\n def counting_indicator_map(version_num: int) -> int:\n if version_num <= 9:\n return 8\n elif version_num <= 26:\n return 10\n else:\n return 12\n\n def encode(self, **kwargs) -> str:\n \"\"\"\n 1. 对于第一字节值在0xA1到0xAA范围,第二字节值在0xA1到0xFE范围的字符\n 1)第一字节值减去0xA1\n 2)将1)的结果乘以0x60\n 3)第二字节值减去0xA1\n 4)将2)的结果加上3)的结果\n 5)将结果转换为13位二进制串\n 2. 对于第一字节值在0xBO到0xFA范围,第二字节值在0xA1到0xFE范围的字符\n 1)第一字节值减去0xA6\n 2)将1)的结果乘以0x60\n 3)第二字节值减去0xA1\n 4)将2)的结果加上3)的结果\n 5)将结果转换为13位二进制串\n \"\"\"\n character = \"\"\n for _c in self.raw_data:\n c1, c2 = _c.encode('gb2312')\n if 0xA1 <= c1 <= 0xAA:\n c1 -= 0xA1\n else:\n c1 -= 0xA6\n c1 *= 0x60\n c2 -= 0xA1\n character += self.bin(c1 + c2, 13)\n return character\n\n @classmethod\n def get_len(cls, version: Version, raw_data: str) -> int:\n counting = cls.counting_indicator_map(version.version_num)\n return 4 + 4 + counting + 13 * len(raw_data)\n"},"size":{"kind":"number","value":1326,"string":"1,326"}}},{"rowIdx":128353,"cells":{"max_stars_repo_path":{"kind":"string","value":"generateURDF.py"},"max_stars_repo_name":{"kind":"string","value":"giaco5988/BrickRegistration"},"max_stars_count":{"kind":"number","value":121,"string":"121"},"id":{"kind":"string","value":"2172322"},"content":{"kind":"string","value":"import os\nimport sys\nimport threading\nfrom object2urdf import ObjectUrdfBuilder\nimport shutil\n\n# Build single URDFs\nobject_folder = \"lego\"\n\n#An ugly copy paste of build_library to catch exception\ndef safe_build_library(urdfbuilder, **kwargs):\n print(\"\\nFOLDER: %s\"%(urdfbuilder.object_folder))\n\n # Get all OBJ files\n obj_files = urdfbuilder._get_files_recursively(urdfbuilder.object_folder, filter_extension='.obj', exclude_suffix=urdfbuilder.suffix)\n stl_files = urdfbuilder._get_files_recursively(urdfbuilder.object_folder, filter_extension='.stl', exclude_suffix=urdfbuilder.suffix) \n\n obj_folders=[]\n for root, _, full_file in obj_files:\n obj_folders.append(root)\n try:\n urdfbuilder.build_urdf(full_file,**kwargs)\n except:\n print(\"An exception occured during \" + full_file )\n common = os.path.commonprefix([urdfbuilder.object_folder,full_file])\n rel = os.path.join(full_file.replace(common,''))\n print('\\tBuilding: %s'%(rel) )\n \n for root, _, full_file in stl_files:\n if root not in obj_folders:\n try:\n urdfbuilder.build_urdf(full_file,**kwargs)\n except:\n print(\"An exception occured during \" + full_file)\n \n common = os.path.commonprefix([urdfbuilder.object_folder,full_file])\n rel = os.path.join(full_file.replace(common,''))\n print('Building: %s'%(rel) )\n\ndef thread_function(tindex,nthreads):\n shutil.copy( \"_prototype.urdf\",object_folder+\"-\"+str(tindex),follow_symlinks=True)\n builder = ObjectUrdfBuilder(object_folder+\"-\"+str(tindex))\n #we use center = \"geometry\" instead of \"mass\" because it fails on some objects and make the program crash\n #we use depth=1 as an extra parameter for vhacd so that it sacrifice collision geometry quality so that \n #it goes faster during simulation\n #oclAcceleration=0\n safe_build_library(builder,force_overwrite=True, decompose_concave=True, force_decompose=False, center = 'geometry',depth=1)\n \n \nnthreads = 8\nfor i in range(nthreads):\n x = threading.Thread(target=thread_function, args=(i,nthreads))\n x.start()"},"size":{"kind":"number","value":2259,"string":"2,259"}}},{"rowIdx":128354,"cells":{"max_stars_repo_path":{"kind":"string","value":"api/Folder.py"},"max_stars_repo_name":{"kind":"string","value":"BowangLan/uw-tools"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171652"},"content":{"kind":"string","value":"import typing\nfrom __future__ import annotations\nfrom dataclasses import dataclass\nfrom .util import with_client\nimport httpx\nfrom .Site import Site\nfrom .File import File\n\n@dataclass\nclass Folder:\n id: str\n name: str\n full_name: str = None\n parent: Folder = None\n\n full_files_url: str = None\n full_folders_url: str = None\n files: typing.List[File] = None\n folders: typing.List[Folder] = None\n\n created_at: str = None\n updated_at: str = None\n\n site: Site\n\n def make_get_items_params(self):\n params = {\n \"include[]\": [\n \"user\", \n \"usage_rights\", \n \"enhanced_preview_url\",\n \"context_asset_string\",\n ],\n \"per_page\": \"20\",\n \"sort\": \"\",\n \"order\": \"\",\n }\n return params\n\n @with_client\n def get_files(self, client: httpx.Client = None):\n params = self.make_get_items_params()\n res = client.get(self.full_files_url, params=params)\n data = res.json()\n self.files = [\n File(\n **data,\n site=self.site,\n parent=self\n ) \n for i in data]\n return self.files\n\n @with_client\n def get_folders(self, client: httpx.Client = None, with_params=True):\n if with_params:\n params = self.make_get_items_params()\n res = client.get(self.full_folders_url, params=params)\n else:\n res = client.get(self.full_folders_url)\n data = res.json()\n self.folders = [\n Folder(\n id=str(i['id']), \n name=i['name'], \n site=self.site,\n parent=self\n ) \n for i in data]\n \n @with_client\n def get_items(self, client: httpx.Client = None):\n self.get_folders(client=client)\n self.get_files(client=client)\n"},"size":{"kind":"number","value":1919,"string":"1,919"}}},{"rowIdx":128355,"cells":{"max_stars_repo_path":{"kind":"string","value":"PaddleFSL/paddlefsl/utils/eval.py"},"max_stars_repo_name":{"kind":"string","value":"Chaoqun-Guo/FSL-Mate"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171168"},"content":{"kind":"string","value":"# Copyright 2021 PaddleFSL Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport paddle\n\n\n__all__ = ['classification_acc']\n\n\ndef classification_acc(predict, label):\n \"\"\"\n Calculate classification accuracy: correct_result / sample_number\n\n Args:\n predict(paddle.Tensor): predictions, shape (sample_number, class_number), in the form of one-hot coding.\n label(paddle.Tensor): labels, shape (sample_number), in the form of continuous coding.\n\n Returns:\n accuracy(float): classification accuracy.\n\n Examples:\n ..code-block:: python\n\n import paddlefsl.utils as utils\n predictions = paddle.to_tensor([[0.1, 0.9], [0.8, 0.2]], dtype='float32')\n labels = paddle.to_tensor([0, 0], dtype='int64')\n accuracy = utils.classification_acc(predictions, labels) # accuracy: 0.5\n\n \"\"\"\n correct = 0\n for i in range(predict.shape[0]):\n if paddle.argmax(predict[i]) == int(label[i]):\n correct += 1\n return float(correct) / predict.shape[0]"},"size":{"kind":"number","value":1543,"string":"1,543"}}},{"rowIdx":128356,"cells":{"max_stars_repo_path":{"kind":"string","value":"code_week29_119_1115/freedom_trail.py"},"max_stars_repo_name":{"kind":"string","value":"dylanlee101/leetcode"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171804"},"content":{"kind":"string","value":"'''\n电子游戏“辐射4”中,任务“通向自由”要求玩家到达名为“Freedom Trail Ring”的金属表盘,并使用表盘拼写特定关键词才能开门。\n\n给定一个字符串 ring,表示刻在外环上的编码;给定另一个字符串 key,表示需要拼写的关键词。您需要算出能够拼写关键词中所有字符的最少步数。\n\n最初,ring 的第一个字符与12:00方向对齐。您需要顺时针或逆时针旋转 ring 以使 key 的一个字符在 12:00 方向对齐,然后按下中心按钮,以此逐个拼写完 key 中的所有字符。\n\n旋转 ring 拼出 key 字符 key[i] 的阶段中:\n\n您可以将 ring 顺时针或逆时针旋转一个位置,计为1步。旋转的最终目的是将字符串 ring 的一个字符与 12:00 方向对齐,并且这个字符必须等于字符 key[i] 。\n如果字符 key[i] 已经对齐到12:00方向,您需要按下中心按钮进行拼写,这也将算作 1 步。按完之后,您可以开始拼写 key 的下一个字符(下一阶段), 直至完成所有拼写。\n示例:\n\n \n\n\n \n输入: ring = \"godding\", key = \"gd\"\n输出: 4\n解释:\n 对于 key 的第一个字符 'g',已经在正确的位置, 我们只需要1步来拼写这个字符。\n 对于 key 的第二个字符 'd',我们需要逆时针旋转 ring \"godding\" 2步使它变成 \"ddinggo\"。\n 当然, 我们还需要1步进行拼写。\n 因此最终的输出是 4。\n提示:\n\nring 和 key 的字符串长度取值范围均为 1 至 100;\n两个字符串中都只有小写字符,并且均可能存在重复字符;\n字符串 key 一定可以由字符串 ring 旋转拼出。\n\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/freedom-trail\n'''\n\n\nclass Solution:\n def findRotateSteps(self, ring: str, key: str) -> int:\n MAX = float('inf')\n ringDict = collections.defaultdict(list)\n for i, r in enumerate(ring):\n ringDict[r].append(i)\n\n def minDist(target, preState):\n res = MAX\n for i, d in preState:\n curDist = d + min(abs(target - i), abs(target - i + n), abs(target - i - n))\n res = min(res, curDist)\n return res\n\n n = len(ring)\n dp = [(0, 0)]\n for k in key:\n dp = [(index, minDist(index, dp)) for index in ringDict[k]]\n\n return min(dist for _, dist in dp) + len(key)\n\n"},"size":{"kind":"number","value":1484,"string":"1,484"}}},{"rowIdx":128357,"cells":{"max_stars_repo_path":{"kind":"string","value":"generators.py"},"max_stars_repo_name":{"kind":"string","value":"Cesar17188/python_avanzado"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170370"},"content":{"kind":"string","value":"import time\n\ndef fibo_gen(max: int):\n n1 = 0\n n2 = 1\n counter = 0\n while True:\n if counter == 0:\n counter += 1\n yield n1\n elif counter == 1:\n counter += 1\n yield n2\n else:\n if counter < max:\n aux = n1 + n2\n n1, n2 = n2, aux\n counter += 1\n yield aux\n else:\n break\n\ndef call_fibo(func):\n def wrapper(*args, **kwargs):\n print('La seríe Fibonnaci es: ')\n func(*args, **kwargs)\n print('Gracias por usar secuencia Fibonacci, Hasta pronto!!')\n return wrapper\n\n@call_fibo\ndef iter_element(max):\n fibonacci = fibo_gen(max)\n for i, element in enumerate(fibonacci):\n print(f'element {i+1} --> {element}')\n time.sleep(1)\n\nif __name__ == '__main__':\n max = int(input('Ingrese la cantidad de número de la seríe Fibonacci que desea: '))\n iter_element(max)\n \n\n "},"size":{"kind":"number","value":978,"string":"978"}}},{"rowIdx":128358,"cells":{"max_stars_repo_path":{"kind":"string","value":"applications/talos/contrib/ldap.py"},"max_stars_repo_name":{"kind":"string","value":"triflesoft/django-application-talos"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2172152"},"content":{"kind":"string","value":"from ldap3 import Server, Connection, ALL\nfrom ldap3.core.exceptions import * # TODO too generic import\nfrom talos.models import BasicIdentityDirectoryOption\n\nbasic_identity_directory_option = BasicIdentityDirectoryOption.objects.filter(\n directory__code='ldap')\n\n\nclass LdapConnection:\n\n def __init__(self):\n needful_items = ['host', 'username', 'password', 'port', 'user_search_base', 'cn_search_base']\n values = {}\n\n for item in needful_items:\n try:\n queryset = basic_identity_directory_option.get(name=item)\n values[item] = queryset.value\n except BasicIdentityDirectoryOption.DoesNotExist:\n raise BasicIdentityDirectoryOption.DoesNotExist(\n 'Please specify ldap {item} in BasicIdentityDirectoryOption'.format(item=item))\n\n self.host = values['host']\n self.port = int(values['port'])\n self.username = values['username']\n self.password = values['password']\n self.user_search_base = values['user_search_base']\n self.cn_search_base = values['cn_search_base']\n\n self.server = self.server_connect()\n self.connection = self.create_connection()\n\n def server_connect(self):\n server = Server(self.host, port=self.port, get_info=ALL)\n if not server.check_availability():\n raise Exception('LDAP Server is not reachable')\n return server\n\n def create_connection(self):\n if not self.server:\n raise Exception(\"Please run connect()\")\n connection = Connection(self.server, user=self.username, password=self.password, raise_exceptions=True)\n connection.open()\n\n try:\n connection.bind()\n\n except LDAPInvalidCredentialsResult:\n\n raise LDAPAttributeError(\"Invalid LDAP Credentials\")\n\n return connection\n\n def check_credentials(self, username, password):\n # If user principal name is entered ()\n if '@' in username:\n search_filter = \"userPrincipalName\"\n search_value = username\n\n # If user NetBios\\sAMAccountName is entered\n elif \"\\\\\" in username:\n net_bios_name = username.split('\\\\')[0]\n username = username.split('\\\\')[1]\n\n self.connection.search(\n search_base=self.cn_search_base,\n search_filter='(netbiosname=*)',\n attributes=['*'])\n net_bios_name_entries = self.connection.entries\n\n if len(net_bios_name_entries) == 0:\n raise LDAPAttributeError(\"NetBos name not found\")\n\n # If user input netbios name match netbios name searched in LDAP\n elif net_bios_name != self.connection.entries[0]['nETBIOSName']:\n raise LDAPInvalidCredentialsResult(\"Invalid NetBios name\")\n\n # If dc=server, dc=com is matched to read domain controller\n elif self.user_search_base != self.connection.entries[0]['nCName']:\n raise LDAPInvalidCredentialsResult(\"Invalid NetBios name\")\n\n search_value = username\n search_filter = \"sAMAccountName\"\n\n else:\n search_value = username\n search_filter = \"sAMAccountName\"\n\n self.connection.search(\n search_base=self.user_search_base,\n search_filter='({search_filter}={search_value})'.format(\n search_filter=search_filter,\n search_value=search_value),\n attributes='userPrincipalName')\n # If no user found\n if len(self.connection.entries) != 1:\n raise LDAPInvalidCredentialsResult('Username not found in LDAP')\n\n user_principal_name = str(self.connection.entries[0]['userPrincipalName'])\n\n self.connection = Connection(\n self.server,\n user=user_principal_name,\n password=password,\n check_names=True,\n lazy=False,\n raise_exceptions=True,\n auto_bind=True)\n self.connection.open()\n\n try:\n self.connection.bind()\n except LDAPInvalidCredentialsResult:\n raise LDAPInvalidCredentialsResult(\"Invalid credentials\")\n\n return True\n"},"size":{"kind":"number","value":4249,"string":"4,249"}}},{"rowIdx":128359,"cells":{"max_stars_repo_path":{"kind":"string","value":"soc/rtl/debug/debug_mem.py"},"max_stars_repo_name":{"kind":"string","value":"mfkiwl/pifive-cpu"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2172346"},"content":{"kind":"string","value":"from migen import *\nimport math\nfrom third_party import wishbone as wb\n\nclass DebugMemory(Module):\n def __init__(self, bus=None, debug_bus=None, enable_code=0xABAB12):\n if bus is None:\n self.bus = wb.Interface(data_width=32, adr_width=32)\n else:\n self.bus = bus\n\n if debug_bus is None:\n self.debug_bus = wb.Interface(data_width=32, adr_width=32)\n else:\n self.debug_bus = debug_bus\n\n wb_rd_req = self.bus.cyc & self.bus.stb & ~self.bus.ack & ~self.bus.we\n wb_wr_req = self.bus.cyc & self.bus.stb & ~self.bus.ack & self.bus.we\n\n wb_rd_data = Signal(32)\n\n self.comb += self.bus.dat_r.eq(wb_rd_data)\n\n enable_entry = Signal(32, reset=0)\n enabled = enable_entry == Constant(enable_code)\n\n # 00 = Cfg/Status {22'b0, err, ack, sel[3:0], 2'b0, rd_req, wr_req}\n # 04 = Addr\n # 08 = Write Data\n # 0C = Read Data\n # 10 = Enable\n\n self.sync += [\n self.bus.ack.eq(0),\n self.bus.err.eq(0),\n\n self.debug_bus.ack.eq(0),\n self.debug_bus.err.eq(0),\n self.debug_bus.dat_r.eq(0),\n\n If(self.debug_bus.stb & self.debug_bus.cyc & ~self.debug_bus.ack,\n self.debug_bus.ack.eq(1),\n\n If((self.debug_bus.adr >> 2) == 0,\n If(enabled & self.debug_bus.we & self.debug_bus.sel[1],\n If(self.debug_bus.dat_w[8], self.bus.ack.eq(1)).\n Elif(self.debug_bus.dat_w[9], self.bus.err.eq(1))),\n\n self.debug_bus.dat_r.eq(Cat(wb_wr_req, wb_rd_req, Constant(0, bits_sign=2), self.bus.sel))),\n\n If((self.debug_bus.adr >> 2) == 1,\n self.debug_bus.dat_r.eq(self.bus.adr)),\n\n If((self.debug_bus.adr >> 2) == 2,\n self.debug_bus.dat_r.eq(self.bus.dat_w)),\n\n If((self.debug_bus.adr >> 2) == 3,\n If(self.debug_bus.we & self.debug_bus.sel[0], wb_rd_data[0:8].eq(self.debug_bus.dat_w[0:8])),\n If(self.debug_bus.we & self.debug_bus.sel[1], wb_rd_data[8:16].eq(self.debug_bus.dat_w[8:16])),\n If(self.debug_bus.we & self.debug_bus.sel[2], wb_rd_data[16:24].eq(self.debug_bus.dat_w[16:24])),\n If(self.debug_bus.we & self.debug_bus.sel[3], wb_rd_data[24:32].eq(self.debug_bus.dat_w[24:32])),\n self.debug_bus.dat_r.eq(wb_rd_data)),\n\n If((self.debug_bus.adr >> 2) == 4,\n If(self.debug_bus.we & self.debug_bus.sel[0], enable_entry[0:8].eq(self.debug_bus.dat_w[0:8])),\n If(self.debug_bus.we & self.debug_bus.sel[1], enable_entry[8:16].eq(self.debug_bus.dat_w[8:16])),\n If(self.debug_bus.we & self.debug_bus.sel[2], enable_entry[16:24].eq(self.debug_bus.dat_w[16:24])),\n If(self.debug_bus.we & self.debug_bus.sel[3], enable_entry[24:32].eq(self.debug_bus.dat_w[24:32])),\n self.debug_bus.dat_r.eq(enable_entry)),\n )\n ]\n\n"},"size":{"kind":"number","value":3061,"string":"3,061"}}},{"rowIdx":128360,"cells":{"max_stars_repo_path":{"kind":"string","value":"apps/users/urls.py"},"max_stars_repo_name":{"kind":"string","value":"shao-169/SLTP"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170343"},"content":{"kind":"string","value":"# _*_ encoding:utf-8 _*_\nfrom django.conf.urls import url\nfrom users.views import LoginView,LogoutView,RegisterView,ActiveUserView,IndexView\\\n ,ForgetPWView,FindPWCordView,UserInfoView,NickNameView,WorkNameView,UploadImageView,MyCourseView,\\\n DeleteMyCourseView,MyFavCouresView,MyFavTeacherView,MyPhoneView\n__author__ = 'YZF'\n__date__ = '2018/3/14,16:26'\nurlpatterns = [\n url(r'^login/', LoginView.as_view(), name='login'),\n url(r'^register/',RegisterView.as_view(),name='register'),\n # url(r'forget/(?P.*)*/',ForgetPWView.as_view(),name='forget'),\n url(r'^forget/', ForgetPWView.as_view(), name='forget'),\n url(r'^logout/',LogoutView.as_view(),name='logout'),\n url(r'^forget_cord/',FindPWCordView.as_view(),name='forgetcord'),\n # 激活用户url\n url('^active/(?P.*)/', ActiveUserView.as_view(), name= \"user_active\"),\n url('^info/', UserInfoView.as_view(), name=\"user_info\"),\n url('^nickname/', NickNameView.as_view(), name=\"info_nickname\"),\n url('^phone/', MyPhoneView.as_view(), name=\"info_phone\"),\n url('^workname/', WorkNameView.as_view(), name=\"info_workname\"),\n url(r'^mycourses/', MyCourseView.as_view(), name='info_courses'),\n url(r'^favteacher/', MyFavTeacherView.as_view(), name='fav_teacher'),\n url(r'^myfav/', MyFavCouresView.as_view(), name='info_myfav'),\n url('^delete/(?P.*)/', DeleteMyCourseView.as_view(), name= \"course_delete\"),\n url(r'^uploadoimg/', UploadImageView.as_view(), name='image_upload'),\n\n\n]"},"size":{"kind":"number","value":1502,"string":"1,502"}}},{"rowIdx":128361,"cells":{"max_stars_repo_path":{"kind":"string","value":"jp.atcoder/abc049/arc065_a/11874696.py"},"max_stars_repo_name":{"kind":"string","value":"kagemeka/atcoder-submissions"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171497"},"content":{"kind":"string","value":"import sys\r\n\r\nt = set(\"dream, dreamer, erase, eraser\".split(\", \"))\r\n\r\n\r\ndef obtainable(s):\r\n while True:\r\n if s[-5:] in t:\r\n s = s[:-5]\r\n elif s[-6:] in t:\r\n s = s[:-6]\r\n else:\r\n return False\r\n if not s:\r\n return True\r\n\r\n\r\ns = sys.stdin.readline().rstrip()\r\n\r\n\r\ndef main():\r\n print(\"YES\" if obtainable(s) else \"NO\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n"},"size":{"kind":"number","value":439,"string":"439"}}},{"rowIdx":128362,"cells":{"max_stars_repo_path":{"kind":"string","value":"codes_auto/1678.number-of-ways-to-split-a-string.py"},"max_stars_repo_name":{"kind":"string","value":"smartmark-pro/leetcode_record"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2169468"},"content":{"kind":"string","value":"#\n# @lc app=leetcode.cn id=1678 lang=python3\n#\n# [1678] number-of-ways-to-split-a-string\n#\nNone\n# @lc code=end"},"size":{"kind":"number","value":110,"string":"110"}}},{"rowIdx":128363,"cells":{"max_stars_repo_path":{"kind":"string","value":"pyqt_foldable_toolbar/foldableToolBar.py"},"max_stars_repo_name":{"kind":"string","value":"yjg30737/pyqt-foldable-toolbar"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171894"},"content":{"kind":"string","value":"from PyQt5.QtWidgets import QToolBar, QWidget, QHBoxLayout, QSizePolicy, QAction, QWidgetAction\nfrom PyQt5.QtCore import Qt, QPropertyAnimation, QAbstractAnimation\nfrom pyqt_svg_button import SvgButton\n\n\nclass FoldableToolBar(QToolBar):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.__initUi()\n\n def __initUi(self):\n self.setMovable(False)\n\n self.__foldBtn = SvgButton()\n self.__foldBtn.setIcon('ico/fold.svg')\n self.__foldBtn.setCheckable(True)\n self.__foldBtn.toggled.connect(self.__fold)\n self.__foldBtn.setMaximumWidth(12)\n\n cornerWidget = QWidget()\n lay = QHBoxLayout()\n lay.addWidget(self.__foldBtn)\n lay.setAlignment(Qt.AlignRight | Qt.AlignBottom)\n lay.setContentsMargins(0, 0, 0, 0)\n cornerWidget.setLayout(lay)\n cornerWidget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)\n\n self.__foldAction = QWidgetAction(self)\n self.__foldAction.setDefaultWidget(cornerWidget)\n\n self.addAction(self.__foldAction)\n\n self.__menuAnimation = QPropertyAnimation(self, b\"height\")\n self.__menuAnimation.valueChanged.connect(self.setFixedHeight)\n\n self.__menuAnimation.setStartValue(self.sizeHint().height())\n self.__menuAnimation.setDuration(200) # default duration\n self.__menuAnimation.setEndValue(self.__foldBtn.sizeHint().height()) # default end value\n\n def __fold(self, f):\n if f:\n self.__menuAnimation.setDirection(QAbstractAnimation.Forward)\n self.__menuAnimation.start()\n self.__foldBtn.setIcon('ico/unfold.svg')\n self.setFixedHeight(self.__foldBtn.sizeHint().height())\n else:\n self.__menuAnimation.setDirection(QAbstractAnimation.Backward)\n self.__menuAnimation.start()\n self.__foldBtn.setIcon('ico/fold.svg')\n self.setFixedHeight(self.sizeHint().height())\n\n def addWidget(self, widget: QWidget) -> QAction:\n self.insertWidget(self.__foldAction, widget)\n self.__menuAnimation.setStartValue(self.sizeHint().height())"},"size":{"kind":"number","value":2151,"string":"2,151"}}},{"rowIdx":128364,"cells":{"max_stars_repo_path":{"kind":"string","value":"leasing/tests/api/test_create_invoice.py"},"max_stars_repo_name":{"kind":"string","value":"hkotkanen/mvj"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171507"},"content":{"kind":"string","value":"import datetime\nimport json\nfrom decimal import Decimal\n\nimport pytest\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.urls import reverse\nfrom django.utils import timezone\n\nfrom leasing.enums import ContactType, TenantContactType\nfrom leasing.models import Invoice\n\n\n@pytest.mark.django_db\ndef test_create_invoice(django_db_setup, admin_client, lease_factory, tenant_factory,\n tenant_rent_share_factory, contact_factory, tenant_contact_factory):\n lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1,\n start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True)\n\n tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1)\n tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1)\n contact1 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1,\n start_date=datetime.date(year=2000, month=1, day=1))\n\n data = {\n 'lease': lease.id,\n 'recipient': contact1.id,\n 'due_date': '2019-01-01',\n 'rows': [\n {\n 'amount': Decimal(10),\n 'receivable_type': 1,\n }\n ],\n }\n\n url = reverse('invoice-list')\n response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n\n assert response.status_code == 201, '%s %s' % (response.status_code, response.data)\n\n invoice = Invoice.objects.get(pk=response.data['id'])\n\n assert invoice.invoicing_date == timezone.now().date()\n assert invoice.outstanding_amount == Decimal(10)\n\n\n@pytest.mark.django_db\ndef test_create_invoice_for_tenant(django_db_setup, admin_client, lease_factory, tenant_factory,\n tenant_rent_share_factory, contact_factory, tenant_contact_factory):\n lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1,\n start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True)\n\n tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1)\n tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1)\n contact1 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1,\n start_date=datetime.date(year=2000, month=1, day=1))\n\n data = {\n 'lease': lease.id,\n 'tenant': tenant1.id,\n 'due_date': '2019-01-01',\n 'rows': [\n {\n 'amount': Decimal(10),\n 'receivable_type': 1,\n }\n ],\n }\n\n url = reverse('invoice-list')\n response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n\n assert response.status_code == 201, '%s %s' % (response.status_code, response.data)\n\n invoice = Invoice.objects.get(pk=response.data['id'])\n\n assert invoice.invoicing_date == timezone.now().date()\n assert invoice.outstanding_amount == Decimal(10)\n assert invoice.recipient == contact1\n assert invoice.rows.first().tenant == tenant1\n\n\n@pytest.mark.django_db\ndef test_create_invoice_for_tenant_with_billing_contact(django_db_setup, admin_client, lease_factory, tenant_factory,\n tenant_rent_share_factory, contact_factory,\n tenant_contact_factory):\n lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1,\n start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True)\n\n tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1)\n tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1)\n contact1 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1,\n start_date=datetime.date(year=2000, month=1, day=1))\n contact2 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.BILLING, tenant=tenant1, contact=contact2,\n start_date=datetime.date(year=2000, month=1, day=1))\n\n data = {\n 'lease': lease.id,\n 'tenant': tenant1.id,\n 'due_date': '2019-01-01',\n 'rows': [\n {\n 'amount': Decimal(10),\n 'receivable_type': 1,\n }\n ],\n }\n\n url = reverse('invoice-list')\n response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n\n assert response.status_code == 201, '%s %s' % (response.status_code, response.data)\n\n invoice = Invoice.objects.get(pk=response.data['id'])\n\n assert invoice.invoicing_date == timezone.now().date()\n assert invoice.outstanding_amount == Decimal(10)\n assert invoice.recipient == contact2\n assert invoice.rows.first().tenant == tenant1\n\n\n@pytest.mark.django_db\ndef test_create_invoice_tenant_not_in_lease(django_db_setup, admin_client, lease_factory, tenant_factory,\n tenant_rent_share_factory, contact_factory, tenant_contact_factory):\n lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1,\n start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True)\n tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1)\n tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1)\n contact1 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1,\n start_date=datetime.date(year=2000, month=1, day=1))\n\n lease2 = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1,\n start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True)\n tenant2 = tenant_factory(lease=lease2, share_numerator=1, share_denominator=1)\n tenant_rent_share_factory(tenant=tenant2, intended_use_id=1, share_numerator=1, share_denominator=1)\n contact2 = contact_factory(first_name=\"\", last_name=\"\", type=ContactType.PERSON)\n tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant2, contact=contact2,\n start_date=datetime.date(year=2000, month=1, day=1))\n\n data = {\n 'lease': lease.id,\n 'tenant': tenant2.id,\n 'due_date': '2019-01-01',\n 'rows': [\n {\n 'amount': Decimal(10),\n 'receivable_type': 1,\n }\n ],\n }\n\n url = reverse('invoice-list')\n response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json')\n\n assert response.status_code == 400, '%s %s' % (response.status_code, response.data)\n"},"size":{"kind":"number","value":7475,"string":"7,475"}}},{"rowIdx":128365,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/reactor/banbanNormal.py"},"max_stars_repo_name":{"kind":"string","value":"G00dBye/YYMS"},"max_stars_count":{"kind":"number","value":54,"string":"54"},"id":{"kind":"string","value":"2171866"},"content":{"kind":"string","value":"hitCount = 0\n\n# global hitCount\n# hitCount += 1\n# sm.chat(str(hitCount))\n# if hitCount >= 1:\nsm.spawnMob(9303154, -135, 455, False)\nsm.removeReactor()\nsm.dispose()\n"},"size":{"kind":"number","value":164,"string":"164"}}},{"rowIdx":128366,"cells":{"max_stars_repo_path":{"kind":"string","value":"1-HRF-xgb/repre/graphconv.py"},"max_stars_repo_name":{"kind":"string","value":"iamlockelightning/HIF-KAT"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2172477"},"content":{"kind":"string","value":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch_geometric.nn import GCNConv, GATConv\n\nclass GraphConvLayer(nn.Module):\n def __init__(\n self,\n adj_index,\n adj_weight, \n input_dim = 32,\n output_dim = 16,\n dropout = 0.4,\n activation = F.leaky_relu,\n device = \"cuda\",\n text_field = None,\n share = True,\n bias = False,\n residual = False\n ):\n assert text_field is not None\n super(GraphConvLayer, self).__init__()\n self.share = share\n self.residual = residual\n self.fields = text_field[:]\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n self.adj_index = adj_index\n self.adj_weight = adj_weight\n\n self.gcns = nn.ModuleDict()\n\n if residual:\n self.reslin = nn.ModuleDict()\n\n if share:\n gcn = GCNConv(input_dim, output_dim, cached = False, bias = bias, normalize = True, improved = True)\n if residual:\n lin = nn.Linear(input_dim, output_dim, bias = bias)\n for field in self.fields:\n self.gcns[field] = gcn\n if residual:\n self.reslin[field] = lin\n else:\n for field in self.fields:\n self.gcns[field] = GCNConv(\n input_dim, \n output_dim, \n cached = False, \n bias = bias, \n normalize = True, \n improved = True\n )\n if residual:\n self.reslin[field] = nn.Linear(input_dim, output_dim, bias = bias)\n\n if \"cuda\" in device:\n self.cuda()\n\n def forward(self, batch):\n ret = dict()\n for field in self.fields:\n rep = batch[field]\n ### Add Dropout 2020-03-25 ### \n # rep = self.dropout(rep)\n ###\n\n rep = self.gcns[field](rep, self.adj_index, self.adj_weight)\n if self.residual:\n rep += self.reslin[field](batch[field])\n rep = self.activation(rep)\n ret[field] = rep\n\n return ret\n # return {field: self.activation(self.gcns[field](batch[field], self.adj_index, self.adj_weight)) for field in self.fields}\n\n\n\n\n\n\n\n\n\nclass MLP(nn.Module):\n def __init__(\n self,\n input_dim = 32,\n output_dim = 16,\n dropout = 0.4,\n activation = F.leaky_relu,\n device = \"cuda\",\n text_field = None,\n bias = False\n ):\n assert text_field is not None\n super(MLP, self).__init__()\n self.fields = text_field[:]\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n\n self.mlps = nn.ModuleDict()\n\n\n\n for field in self.fields:\n self.mlps[field] = torch.nn.Linear(\n in_features = input_dim,\n out_features = output_dim,\n bias = bias\n )\n\n if \"cuda\" in device:\n self.cuda()\n\n def forward(self, batch):\n ret = dict()\n for field in self.fields:\n rep = batch[field]\n ### Add Dropout 2020-03-25 ### \n # rep = self.dropout(rep)\n ###\n\n rep = self.mlps[field](rep)\n rep = self.activation(rep)\n ret[field] = rep\n\n return ret\n\n\n\n\nclass GraphAtteLayer(nn.Module):\n def __init__(\n self,\n adj_index,\n adj_weight, \n input_dim = 32,\n output_dim = 16,\n head_number = 8, ### New\n dropout = 0.4,\n activation = F.leaky_relu,\n device = \"cuda\",\n text_field = None,\n share = True,\n bias = False,\n residual = False,\n concat = False\n ):\n assert text_field is not None\n super(GraphAtteLayer, self).__init__()\n self.share = share\n self.residual = residual\n self.fields = text_field[:]\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n self.adj_index = adj_index\n self.adj_weight = adj_weight\n\n self.gats = nn.ModuleDict()\n\n if share:\n pass\n\n else:\n for field in self.fields:\n self.gats[field] = GATConv(\n input_dim,\n output_dim,\n head_number,\n concat = concat,\n dropout = dropout\n )\n \n if \"cuda\" in device:\n self.cuda()\n\n def forward(self, batch):\n ret = dict()\n for field in self.fields:\n rep = batch[field]\n rep = self.gats[field](rep, self.adj_index)\n rep = self.activation(rep)\n ret[field] = rep\n\n return ret\n\n\n\n\n\n\n\n\nclass HighWay(torch.nn.Module):\n def __init__(self, f_in, f_out, bias=True):\n super(HighWay, self).__init__()\n self.w = Parameter(torch.Tensor(f_in, f_out))\n nn.init.xavier_uniform_(self.w)\n if bias:\n self.bias = Parameter(torch.Tensor(f_out))\n nn.init.constant_(self.bias, 0)\n else:\n self.register_parameter('bias', None)\n \n def forward(self, ori_input, in_1, in_2):\n t = torch.mm(ori_input, self.w)\n if self.bias is not None:\n t = t + self.bias\n gate = torch.sigmoid(t)\n return gate * in_2 + (1.0 - gate) * in_1\n\n\n\nclass GraphConvHighWayLayer(nn.Module):\n def __init__(\n self,\n adj_index,\n adj_weight, \n input_dim = 32,\n output_dim = 16,\n dropout = 0.4,\n activation = F.leaky_relu,\n device = \"cuda\",\n text_field = None,\n share = True,\n bias = False,\n residual = False\n ):\n assert text_field is not None\n super(GraphConvHighWayLayer, self).__init__()\n self.share = share\n self.residual = residual\n self.fields = text_field[:]\n self.activation = activation\n self.dropout = nn.Dropout(dropout)\n self.adj_index = adj_index\n self.adj_weight = adj_weight\n\n self.gcns = nn.ModuleDict()\n self.reslin = nn.ModuleDict()\n self.highway_net = nn.ModuleDict()\n\n\n for field in self.fields:\n self.gcns[field] = GCNConv(\n input_dim, \n output_dim, \n cached = True, \n bias = bias, \n normalize = True, \n improved = True\n )\n self.reslin[field] = nn.Linear(input_dim, output_dim, bias = bias)\n self.highway_net[field] = HighWay(f_in = input_dim, f_out = output_dim, bias = bias)\n\n\n if \"cuda\" in device:\n self.cuda()\n\n def forward(self, batch):\n ret = dict()\n for field in self.fields:\n ori_rep = batch[field]\n ### Add Dropout 2020-03-25 ### \n # rep = self.dropout(rep)\n ###\n gcn_rep = self.gcns[field](rep, self.adj_index, self.adj_weight)\n res_rep += self.reslin[field](ori_rep)\n \n rep = self.highway_net[field](ori_rep, res_rep, gcn_rep)\n rep = self.activation(rep)\n ret[field] = rep\n\n return ret"},"size":{"kind":"number","value":7347,"string":"7,347"}}},{"rowIdx":128367,"cells":{"max_stars_repo_path":{"kind":"string","value":"epicteller/core/controller/member.py"},"max_stars_repo_name":{"kind":"string","value":"KawashiroNitori/epicteller"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172041"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom typing import Optional, Dict, Union, Iterable\n\nimport bcrypt\n\nfrom epicteller.core.dao.member import MemberDAO, MemberExternalDAO\nfrom epicteller.core.model.member import Member\nfrom epicteller.core.util import validator\nfrom epicteller.core.util.enum import ExternalType\n\n\ndef _gen_passhash(password: str) -> str:\n passhash = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt(rounds=10)).decode('utf8')\n return passhash\n\n\nasync def get_member(member_id: Optional[int]=None, *,\n url_token: Optional[str]=None,\n email: Optional[str]=None) -> Optional[Member]:\n if member_id:\n return (await MemberDAO.batch_get_member_by_id([member_id])).get(member_id)\n elif url_token:\n return (await MemberDAO.batch_get_member_by_url_token([url_token])).get(url_token)\n elif email:\n return await MemberDAO.get_member_by_email(email)\n return None\n\n\nasync def batch_get_member(member_ids: Iterable[int]=None, *,\n url_tokens: Iterable[str]=None) -> Dict[Union[int, str], Member]:\n if member_ids:\n return await MemberDAO.batch_get_member_by_id(member_ids)\n elif url_tokens:\n return await MemberDAO.batch_get_member_by_url_token(url_tokens)\n return {}\n\n\nasync def check_member_email_password(email: str, password: str) -> Optional[Member]:\n email = email.lower()\n member = await get_member(email=email)\n if not member:\n return\n matched = bcrypt.checkpw(password.encode('utf8'), member.passhash.encode('utf8'))\n if not matched:\n return\n return member\n\n\nasync def create_member(name: str, email: str, password: str) -> Member:\n passhash = _gen_passhash(password)\n email = email.lower()\n member = await MemberDAO.create_member(name, email, passhash)\n external_id = validator.parse_external_id_from_qq_email(email)\n if external_id:\n await bind_member_external_id(member.id, ExternalType.QQ, external_id)\n return member\n\n\nasync def change_member_password(member_id: int, password: str):\n passhash = _gen_passhash(password)\n await MemberDAO.update_member(member_id, passhash=passhash)\n\n\nasync def bind_member_external_id(member_id: int, external_type: ExternalType, external_id: str) -> None:\n await MemberExternalDAO.bind_member_external_id(member_id, external_type, external_id)\n\n\nasync def unbind_member_external_id(member_id: int, external_type: ExternalType) -> None:\n await MemberExternalDAO.unbind_member_external_id(member_id, external_type)\n\n\nasync def get_member_externals(member_id: int) -> Dict[ExternalType, str]:\n return await MemberExternalDAO.get_external_ids_by_member(member_id)\n\n\nasync def get_member_by_external(external_type: ExternalType, external_id: str) -> Optional[Member]:\n member_id = await MemberExternalDAO.get_member_id_by_external(external_type, external_id)\n if not member_id:\n return None\n return await get_member(member_id)\n\n\nasync def update_member(member_id: int, **kwargs):\n await MemberDAO.update_member(member_id, **kwargs)\n"},"size":{"kind":"number","value":3110,"string":"3,110"}}},{"rowIdx":128368,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_lipid_tilt.py"},"max_stars_repo_name":{"kind":"string","value":"blakeaw/ORBILT"},"max_stars_count":{"kind":"number","value":11,"string":"11"},"id":{"kind":"string","value":"2172110"},"content":{"kind":"string","value":"from __future__ import print_function\nimport pybilt.bilayer_analyzer.bilayer_analyzer as ba\ndef test_lipid_tilt():\n analyzer = ba.BilayerAnalyzer(structure='../pybilt/sample_bilayer/sample_bilayer.psf',\n trajectory='../pybilt/sample_bilayer/sample_bilayer_10frames.dcd',\n selection=\"resname POPC DOPE TLCL2\")\n\n #remove the default msd analysis\n analyzer.remove_analysis('msd_1')\n analyzer.add_analysis(\"lipid_tilt lt leaflet lower resname POPC style order ref_axis z\")\n analyzer.adjust_rep_setting('vector_frame', 'ref_atoms', {'DOPE':{'start':\n ['C218','C318'], 'end':'P'},\n 'POPC':{'start':['C218', 'C316'],\n 'end':'P'}, 'TLCL2':\n {'start':['CA18','CB18','CC18',\n 'CD18'], 'end':['P1', 'P3']}})\n analyzer.run_analysis()\n lt_dat = analyzer.get_analysis_data('lt')\n print('Lipid Tilts (vs time):')\n print(lt_dat)\n\nif __name__ == '__main__':\n test_lipid_tilt()\n"},"size":{"kind":"number","value":1192,"string":"1,192"}}},{"rowIdx":128369,"cells":{"max_stars_repo_path":{"kind":"string","value":"forNSF/soupextract.py"},"max_stars_repo_name":{"kind":"string","value":"ctames/conference-host"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2172486"},"content":{"kind":"string","value":"from bs4 import BeautifulSoup, SoupStrainer\nimport urllib2\nfrom urlparse import urljoin\n\ndef extractPdf(url, levels):\n outurls = [] #pdfs found within n levels\n thiscycle = [url] #List of urls to look through on current loop iteration\n linkstrainer = SoupStrainer(\"a\")\n hdr = {'User-Agent': 'Mozilla/5.0'}\n for i in range (0, levels):\n nextcycle = [] #Urls to look at in next loop iteration\n for currurl in thiscycle:\n print currurl\n request = urllib2.Request(currurl, headers=hdr)\n try:\n page = urllib2.urlopen(request)\n except:\n print 'fuck'\n continue\n soup = BeautifulSoup(page, parse_only=linkstrainer)\n for link in soup.find_all('a'):\n linkurl = link.get('href')\n if not linkurl:\n continue\n elif linkurl[-4:] == '.pdf':\n print linkurl\n if linkurl[:4] == 'http':\n outurls.append(linkurl)\n else:\n finalurl = urljoin(currurl, linkurl)\n outurls.append(finalurl)\n elif i != levels-1:\n if linkurl[:4] == 'http':\n nextcycle.append(linkurl)\n else:\n finalurl = urljoin(currurl, linkurl)\n nextcycle.append(finalurl)\n if not nextcycle:\n return outurls\n thiscycle = nextcycle[:]\n return outurls\n"},"size":{"kind":"number","value":1568,"string":"1,568"}}},{"rowIdx":128370,"cells":{"max_stars_repo_path":{"kind":"string","value":".Python Challenges - 101Computing/Random Library Challenges/postcode.py"},"max_stars_repo_name":{"kind":"string","value":"Gustavo-daCosta/Projetos"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2172305"},"content":{"kind":"string","value":"#Your task is to write a Python script to generates a random UK postcode in the format: LetterLetterNumber_NumberLetterLetter.\nfrom random import randint\n\npostcode = ''\n\nfor position in range(0, 7):\n if position == 2 or position == 4:\n postcode += str(randint(0, 9))\n elif position == 3:\n postcode += ' '\n else:\n postcode += str(chr(randint(65, 90)))\n\nprint(f'Postcode: {postcode}')"},"size":{"kind":"number","value":412,"string":"412"}}},{"rowIdx":128371,"cells":{"max_stars_repo_path":{"kind":"string","value":"authentication/urls.py"},"max_stars_repo_name":{"kind":"string","value":"funsojoba/SendMe_finance_api"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171739"},"content":{"kind":"string","value":"from django.urls import path\n\nfrom .views.register import RegisterView\nfrom .views.login import LoginView\n\nurlpatterns = [\n path('register/', RegisterView.as_view()),\n path('login/', LoginView.as_view())\n]\n"},"size":{"kind":"number","value":212,"string":"212"}}},{"rowIdx":128372,"cells":{"max_stars_repo_path":{"kind":"string","value":"Models/zodesolve.py"},"max_stars_repo_name":{"kind":"string","value":"lefthandedroo/Cosmodels"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172304"},"content":{"kind":"string","value":"7#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 15 13:38:48 2018\n\n@author: BallBlueMeercat\n\"\"\"\nfrom scipy.integrate import odeint\nfrom scipy.interpolate import interp1d\nimport firstderivs_cython as f\nimport numpy as np\n\nfirstderivs_functions = {\n 'rainbow':f.rainbow,\n 'niagara':f.niagara,\n 'kanangra':f.kanangra,\n 'waterfall':f.waterfall,\n 'stepfall':f.stepfall,\n 'exotic':f.exotic,\n 'late_intxde':f.late_intxde,\n 'heaviside_late_int':f.heaviside_late_int,\n 'heaviside_sudden':f.heaviside_sudden,\n 'late_int':f.late_int,\n 'expgamma':f.expgamma,\n 'txgamma':f.txgamma,\n 'zxgamma':f.zxgamma,\n 'gamma_over_z':f.gamma_over_z,\n 'zxxgamma':f.zxxgamma,\n 'gammaxxz':f.gammaxxz,\n 'rdecay_m':f.rdecay_m,\n 'rdecay_de':f.rdecay_de,\n 'rdecay_mxde':f.rdecay_mxde,\n 'rdecay':f.rdecay,\n 'interacting':f.interacting,\n 'LCDM':f.LCDM,\n 'rLCDM':f.rLCDM\n }\ndef zodesolve(names, values, zpicks, model, plot_key, interpolate=False):\n \"\"\"\n Takes in:\n names = list of strings, names of parameters to be fitted;\n values = np.array, values of parameters;\n zpicks = np.ndarray of redshifts ;\n model = string, name of model being tested.\n\n \"\"\"\n all_zpicks = zpicks\n\n if len(zpicks) > 1048: # larger than pantheon sample\n interpolate = True\n zpicks = np.linspace(zpicks[0], zpicks[-1], num=100, endpoint=True)\n\n # Inserting 0 at the front of redshifts to use initial conditions.\n zpicks = np.insert(zpicks, 0, 0.0)\n\n # Standard cosmological parameters.\n H0 = 1.0\n c = 1.0\n c_over_H0 = 4167 * 10**6 # c/H0 in parsecs\n\n # Initial conditions at z = 0 (now).\n t0 = 0.0 # time\n a0 = 1.0 # scale factor\n z0 = 0.0 # redshift\n dl0 = 0.0 # luminosity distance\n rho_c0 = H0**2 # critical density\n\n # Pack up the initial conditions and interaction terms.\n int_terms = []\n\n if model == 'rainbow':\n int_in = 12\n elif model == 'niagara':\n int_in = 10\n elif model == 'kanangra':\n int_in = 8\n elif model == 'waterfall':\n int_in = 6\n elif model == 'stepfall':\n int_in = 4\n elif model == 'exotic':\n int_in = 3\n elif model == 'LCDM' or model == 'rLCDM':\n int_in = len(values)\n else:\n int_in = 2\n\n int_terms = values[int_in:]\n fluids = values[1:int_in]\n ombar_de0 = rho_c0/rho_c0 - np.sum(fluids)\n\n t0a0 = np.array([t0, a0])\n de0z0dl0 = np.array([ombar_de0, z0, dl0])\n\n # Remember that you lost precision when concatenating arr over using a list.\n v0 = np.concatenate((t0a0, fluids, de0z0dl0))\n\n # Extracting the parsed mode of interaction.\n firstderivs_function = firstderivs_functions.get(model,0)\n assert firstderivs_function != 0, \"zodesolve doesn't have this firstderivs_key at the top\"\n # Call the ODE solver with all zpicks or cut_zpicks if len(zpicks) > 2000.\n vsol = odeint(firstderivs_function, v0, zpicks, args=(int_terms,H0), mxstep=5000000, atol=1.0e-8, rtol=1.0e-6)\n\n\n z = vsol[1:,-2]\n dl = vsol[1:,-1] * (1+z) # in units of dl*(H0/c)\n da = dl * (1.0+z)**(-2.0) # in units of dl*(H0/c)\n dlpc = dl * c_over_H0 # dl in parsecs (= vsol[dl] * c/H0)\n dapc = dlpc * (1.0+z)**(-2.0) # in units of pc\n dapc = dapc / 10**3 # in units of kpc\n\n# integrated_dlpc = dlpc\n\n plot_var = {}\n if plot_key:\n # Separate results into their own arrays:\n plot_var['t'] = vsol[1:,0]\n plot_var['a'] = vsol[1:,1]\n\n # Collecting fluids and their names for plotting:\n fluid_arr = np.zeros(((int_in), (len(zpicks)-1)))\n fluid_names = []\n for i in range((int_in-1)):\n fluid_names.append(names[i+1])\n fluid_arr[i] = vsol[1:,(i+2)]\n fluid_names.append('de_ombar')\n fluid_arr[-1] = vsol[1:,-3]\n plot_var['fluid_names'] = fluid_names\n plot_var['fluid_arr'] = fluid_arr\n\n plot_var['z'] = z\n plot_var['dl'] = dl # in units of dl*(H0/c)\n plot_var['int_terms'] = int_terms\n\n plot_var['da'] = da\n\n\n Hz = H0 * (np.sum(fluid_arr, axis=0))**(0.5)\n plot_var['Hz'] = Hz\n\n daMpc = dlpc/10**6 * (1.0+z)**(-2.0) # in units of dl in Mpc*(H0/c)\n dV = (daMpc**2 * c*z/Hz)**(1/3) # combines radial and transverse dilation\n plot_var['dV'] = dV\n\n if interpolate:\n # Interpolating results to give output for all zpicks:\n interp_dlpc = interp1d(zpicks[1:], dlpc)\n interp_da = interp1d(zpicks[1:], da)\n dlpc = interp_dlpc(all_zpicks)\n da = interp_da(all_zpicks)\n\n# return dlpc, da, z, integrated_dlpc, plot_var\n return dlpc, da, plot_var"},"size":{"kind":"number","value":4882,"string":"4,882"}}},{"rowIdx":128373,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/models/cluster_model.py"},"max_stars_repo_name":{"kind":"string","value":"altmirai/piggycli"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172359"},"content":{"kind":"string","value":"import app.utilities.ssh as ssh\nimport json\n\n\nclass Cluster:\n\n def __init__(self, client, id):\n self.client = client\n self.id = id\n\n @classmethod\n def all(cls, client):\n resp = client.describe_clusters()\n return resp['Clusters']\n\n @property\n def hsms(self):\n return self.read()['Hsms']\n\n @property\n def azs(self):\n subnet_mapping = self.read()['SubnetMapping']\n azs = []\n for key, value in subnet_mapping.items():\n azs.append(key)\n return azs\n\n @property\n def csr(self):\n return self.read()['Certificates']['ClusterCsr']\n\n @property\n def state(self):\n return self.read()['State']\n\n def initialize(self, certs):\n assert self.state == 'UNINITIALIZED', 'Cluster state is not UNITIALIZED'\n assert certs.valid, 'Certificates not valid'\n self.client.initialize_cluster(\n ClusterId=self.id,\n SignedCert=certs.pem_hsm_cert.decode('UTF-8'),\n TrustAnchor=certs.pem_ca_cert.decode('UTF-8')\n )\n return\n\n def activate(\n self, instance, crypto_officer_username, crypto_officer_password, crypto_user_username, crypto_user_password, ssh_key):\n eni_ip = self.hsms[0]['EniIp']\n resp_json = ssh.activate_cluster(\n ip_address=instance.public_ip_address,\n ssh_key_file_path=ssh_key.ssh_key_file_path,\n eni_ip=eni_ip,\n crypto_officer_username=crypto_officer_username,\n crypto_officer_password=,\n crypto_user_username=crypto_user_username,\n crypto_user_password=\n )\n\n resp = json.loads(resp_json)\n\n assert resp.get(\n 'error') is None, f\"Activate cluster error: {resp['error']}\"\n assert resp['crypto_officer']['username'] == crypto_officer_username\n assert resp['crypto_officer']['password'] == officer_password\n\n return True\n\n def read(self):\n resp = self.client.describe_clusters(\n Filters={'clusterIds': [self.id]})\n return resp['Clusters'][0]\n\n def destroy(self):\n return False\n"},"size":{"kind":"number","value":2176,"string":"2,176"}}},{"rowIdx":128374,"cells":{"max_stars_repo_path":{"kind":"string","value":"stage2_cINN/AE/modules/LPIPS.py"},"max_stars_repo_name":{"kind":"string","value":"CJWBW/image2video-synthesis-using-cINNs"},"max_stars_count":{"kind":"number","value":85,"string":"85"},"id":{"kind":"string","value":"2172377"},"content":{"kind":"string","value":"\"\"\"Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models\"\"\"\n\nimport torch\nimport torch.nn as nn\n\nfrom stage2_cINN.AE.modules.vgg16 import vgg16, normalize_tensor, spatial_average\nfrom stage2_cINN.AE.modules.ckpt_util import get_ckpt_path\n\n\nclass LPIPS(nn.Module):\n # Learned perceptual metric\n def __init__(self, use_dropout=True):\n super().__init__()\n self.scaling_layer = ScalingLayer()\n self.chns = [64, 128, 256, 512, 512]\n self.net = vgg16(pretrained=True, requires_grad=False)\n self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)\n self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)\n self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)\n self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)\n self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)\n self.load_from_pretrained()\n for param in self.parameters():\n param.requires_grad = False\n\n def load_from_pretrained(self, name=\"vgg_lpips\"):\n ckpt = get_ckpt_path(name)\n self.load_state_dict(torch.load(ckpt, map_location=torch.device(\"cpu\")), strict=False)\n print(\"loaded pretrained LPIPS loss from {}\".format(ckpt))\n\n @classmethod\n def from_pretrained(cls, name=\"vgg_lpips\"):\n if name is not \"vgg_lpips\":\n raise NotImplementedError\n model = cls()\n ckpt = get_ckpt_path(name)\n model.load_state_dict(torch.load(ckpt, map_location=torch.device(\"cpu\")), strict=False)\n return model\n\n def forward(self, input, target):\n in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))\n outs0, outs1 = self.net(in0_input), self.net(in1_input)\n feats0, feats1, diffs = {}, {}, {}\n lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]\n for kk in range(len(self.chns)):\n feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])\n diffs[kk] = (feats0[kk] - feats1[kk]) ** 2\n\n res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]\n val = res[0]\n for l in range(1, len(self.chns)):\n val += res[l]\n return val\n\n\nclass ScalingLayer(nn.Module):\n def __init__(self):\n super(ScalingLayer, self).__init__()\n self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])\n self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None])\n\n def forward(self, inp):\n return (inp - self.shift) / self.scale\n\n\nclass NetLinLayer(nn.Module):\n \"\"\" A single linear layer which does a 1x1 conv \"\"\"\n def __init__(self, chn_in, chn_out=1, use_dropout=False):\n super(NetLinLayer, self).__init__()\n layers = [nn.Dropout(), ] if (use_dropout) else []\n layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]\n self.model = nn.Sequential(*layers)\n\n"},"size":{"kind":"number","value":3048,"string":"3,048"}}},{"rowIdx":128375,"cells":{"max_stars_repo_path":{"kind":"string","value":"hello-world/comments.py"},"max_stars_repo_name":{"kind":"string","value":"selvendiranj/python-tutorial"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172389"},"content":{"kind":"string","value":"\"\"\"\nunderstood various blocks even if they are without braces\n\"\"\"\n\n# First comment\nprint (\"Hello, Python!\" # second comment)\n\nname = \"Madisetti\" # This is again comment\n\n# This is a comment.\n# This is a comment, too.\n# This is a comment, too.\n# I said that already.\n"},"size":{"kind":"number","value":266,"string":"266"}}},{"rowIdx":128376,"cells":{"max_stars_repo_path":{"kind":"string","value":"benchmarks_sphere/paper_jrn_jfm_ppeixoto/benchmark_specific_settings.py"},"max_stars_repo_name":{"kind":"string","value":"valentinaschueller/sweet"},"max_stars_count":{"kind":"number","value":6,"string":"6"},"id":{"kind":"string","value":"2170357"},"content":{"kind":"string","value":"# ---------------------------------------------\n# Class to setup spherical modes initialization \n# author: <>\n# Oct 2021\n# ----------------------------------------\nimport numpy as np\nimport pickle\nfrom numpy.lib.function_base import append\nimport pandas as pd\nimport re\nimport os\nimport os.path\n\nimport matplotlib\n#matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nfrom matplotlib.lines import Line2D\nimport matplotlib.ticker as mtick\n\nfrom mule.postprocessing.JobData import *\n\n#-----------------------------------------------------------------\n# Test Cases for different settings of modes initialization\n# > Used for pre-processing, to creat jobs for sweet\n#-----------------------------------------------------------------\n\nclass modes_TC1: #Init with energy in full shells from n_ini to n_end\n def __init__(self, n_ini, n_end, m_ini, alpha_min, alpha_max, alpha_samples):\n \n self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False)\n\n # Select shells for initial energy\n # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral)\n # n defines the shell\n self.nmodes=[]\n self.mmodes=[]\n self.ampls=[]\n self.n_ini = n_ini\n self.n_end = n_end\n self.m_ini = m_ini\n\n count_modes = 0\n code=\"\"\n \n for n in range(n_ini, n_end+1):\n for m in range(m_ini, n+1):\n self.nmodes.append(n)\n self.mmodes.append(m)\n self.ampls.append(1.0)\n count_modes+=1\n \n\n self.count_modes = count_modes \n\n codes = []\n print()\n print(\"Mode init params:\")\n for a in self.alpha:\n print()\n print(\"alpha = \", a)\n print(\"i n m amp\")\n code = str(self.count_modes)\n for i in range(self.count_modes):\n code+=\"_\"+str(self.nmodes[i])+\"_\"+str(self.mmodes[i])+\"_\"+str(a*self.ampls[i])\n print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i])\n codes.append(code)\n \n self.codes = codes\n print(codes)\n\n def save_file(self, filename):\n\n\n with open(filename, 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n\nclass modes_TC2: #list of initial modes\n def __init__(self, n_list, m_list, alpha_min, alpha_max, alpha_samples, back_n_min=0, back_n_max=0, back_ampl=0.1):\n \n self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False)\n\n # Select shells for initial energy\n # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral)\n # n defines the shell\n self.nmodes=n_list\n self.mmodes=m_list\n self.ampls=[]\n self.n_ini = min(n_list)\n self.n_end = max(n_list)\n self.m_ini = min(m_list)\n\n count_modes = 0\n code=\"\"\n \n for n in n_list:\n self.ampls.append(1.0)\n count_modes+=1\n \n self.count_modes = count_modes \n list_modes = count_modes\n\n #add energy on other modes (background energy)\n n_ini = back_n_min\n n_end = back_n_max\n m_ini = 0\n \n if n_ini != 0 and n_end != 0:\n for n in range(n_ini, n_end+1):\n for m in range(m_ini, n+1):\n if (n,m) in zip(n_list, m_list):\n continue\n else:\n self.nmodes.append(n)\n self.mmodes.append(m)\n self.ampls.append(back_ampl)\n count_modes+=1\n \n self.count_modes = count_modes \n\n codes = []\n print()\n print(\"Mode init params:\")\n for a in self.alpha:\n print()\n print(\"alpha = \", a)\n print(\"i n m amp\")\n code = str(self.count_modes)\n for i in range(self.count_modes):\n if i < list_modes:\n code+=\"_\"+str(self.nmodes[i])+\"_\"+str(self.mmodes[i])+\"_\"+str(a*self.ampls[i])\n print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i])\n else:\n code+=\"_\"+str(self.nmodes[i])+\"_\"+str(self.mmodes[i])+\"_\"+str(self.ampls[i])\n print(i, self.nmodes[i], self.mmodes[i], self.ampls[i])\n codes.append(code)\n \n self.codes = codes\n print(codes)\n \n\n def save_file(self, filename):\n\n with open(filename, 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n\nclass modes_TC3: #list of initial modes and list of background modes\n def __init__(self, n_list, m_list, n_list_back, m_list_back, alpha_min, alpha_max, alpha_samples, back_n_min=0, back_n_max=0, back_ampl=0.1):\n \n self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False)\n\n # Select shells for initial energy\n # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral)\n # n defines the shell\n self.nmodes=n_list+n_list_back\n self.mmodes=m_list+m_list_back\n self.ampls=[]\n self.n_ini = min(n_list)\n self.n_end = max(n_list)\n self.m_ini = min(m_list)\n\n count_modes = 0\n code=\"\"\n \n for n in n_list:\n self.ampls.append(1.0)\n count_modes+=1\n \n self.count_modes = count_modes \n list_modes = count_modes\n\n for n in n_list_back:\n self.ampls.append(1.0)\n count_modes+=1\n\n self.count_modes = count_modes \n\n #add energy on other modes (background energy)\n n_ini = back_n_min\n n_end = back_n_max\n m_ini = 0\n \n if n_ini != 0 and n_end != 0:\n for n in range(n_ini, n_end+1):\n for m in range(m_ini, n+1):\n if (n,m) in zip(self.nmodes, self.mmodes):\n continue\n else:\n self.nmodes.append(n)\n self.mmodes.append(m)\n self.ampls.append(back_ampl)\n count_modes+=1\n \n self.count_modes = count_modes \n\n codes = []\n print()\n print(\"Mode init params:\")\n for a in self.alpha:\n print()\n print(\"alpha = \", a)\n print(\"i n m amp\")\n code = str(self.count_modes)\n for i in range(self.count_modes):\n if i < list_modes:\n code+=\"_\"+str(self.nmodes[i])+\"_\"+str(self.mmodes[i])+\"_\"+str(a*self.ampls[i])\n print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i])\n else:\n code+=\"_\"+str(self.nmodes[i])+\"_\"+str(self.mmodes[i])+\"_\"+str(self.ampls[i])\n print(i, self.nmodes[i], self.mmodes[i], self.ampls[i])\n codes.append(code)\n \n self.codes = codes\n print(codes)\n \n\n def save_file(self, filename):\n\n with open(filename, 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)\n\n#Read a test case object for post-processing\ndef load_file(filename):\n f = open(filename, 'rb')\n obj = pickle.load(f)\n f.close() \n return obj\n"},"size":{"kind":"number","value":7693,"string":"7,693"}}},{"rowIdx":128377,"cells":{"max_stars_repo_path":{"kind":"string","value":"nbx/nbmanager/tagged_gist/tests/test_notebook_gist.py"},"max_stars_repo_name":{"kind":"string","value":"dalejung/nbx"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2171869"},"content":{"kind":"string","value":"from ..notebook_gisthub import NotebookGistHub\nfrom ..gisthub import GistHub\nfrom .test_gisthub import generate_gisthub\n\nfrom nbx.tools import assert_items_equal\n\nfrom nbx.nbmanager.tests.common import (\n hub,\n require_github,\n make_notebookgist,\n)\n\n\nclass TestNotebookGist:\n\n def test_notebookgist(self):\n nb = make_notebookgist()\n assert nb.suffix == \"[123].ipynb\"\n assert nb.key_name == \"Test Gist [123].ipynb\"\n # test pass through via __getattr__\n assert nb.id == 123\n assert_items_equal(nb.files.keys(),\n ['a.ipynb', 'b.ipynb', 'test.txt'])\n\n def test_strip_gist_id(self):\n nb = make_notebookgist()\n key_name = nb.key_name\n name = nb.strip_gist_id(key_name)\n assert nb.name == name\n\n def test_key_name(self):\n \" Test that key_name rebuilds when name is changed \"\n nb = make_notebookgist()\n nb.name = \"test\"\n assert nb.key_name == \"test [123].ipynb\"\n\n def test_notebook_content(self):\n nb = make_notebookgist()\n content = nb.notebook_content\n assert content == \"a.ipynb content\"\n\n nb.notebook_content = 'new nb content'\n assert nb.notebook_content == 'new nb content'\n\n def test_generate_payload(self):\n nb = make_notebookgist()\n payload = nb._generate_payload()\n assert_items_equal(payload['files'].keys(), ['a.ipynb'])\n\n nb.notebook_content = 'new nb content'\n assert nb.notebook_content == 'new nb content'\n\n def test_generate_description(self):\n \"\"\"\n NotebookGist._generate_description will generate a proper\n description string to reflect name, active, and tags\n \"\"\"\n nb = make_notebookgist()\n # make sure notebook isn't in tags\n assert '#notebook' not in nb.tags\n desc = nb._generate_description()\n # the description should insert the #notebook tag\n assert '#notebook' in desc\n\n # test that inactive gets added\n assert '#inactive' not in desc\n nb.active = False\n test = nb._generate_description()\n assert '#inactive' in test\n\n # change name\n nb.name = \"WOO\"\n test = nb._generate_description()\n assert test == \"WOO #notebook #inactive #pandas #woo\"\n\n # change tags\n nb.tags = [\"#newtag\"]\n test = nb._generate_description()\n assert test == \"WOO #notebook #inactive #newtag\"\n\n def test_get_revision_content(self):\n nb = make_notebookgist()\n revisions = nb.revisions\n # a.ipynb is only revision 0 and 1\n keys = map(lambda x: x['id'], revisions)\n assert list(keys) == [0, 1]\n assert nb.get_revision_content(0) == \"a.ipynb_0_revision_content\"\n assert nb.get_revision_content(1) == \"a.ipynb_1_revision_content\"\n\n def test_save(self):\n # test content/name change\n nb = make_notebookgist()\n gisthub = nb.gisthub\n nb.notebook_content = 'test'\n nb.name = \"BOB\"\n gisthub.save(nb)\n assert nb.gist.edit.call_count == 1\n args = nb.gist.edit.call_args[0]\n fo = args[1]['a.ipynb']\n assert fo._InputFileContent__content == 'test'\n assert args[0] == \"BOB #notebook #pandas #woo\"\n\n nb.active = False\n gisthub.save(nb)\n assert nb.gist.edit.call_count == 2\n args = nb.gist.edit.call_args[0]\n fo = args[1]['a.ipynb']\n assert fo._InputFileContent__content == 'test'\n assert args[0] == \"BOB #notebook #inactive #pandas #woo\"\n\n\ndef setup_notebookgisthub():\n names = [\n \"Test gist #frank #notebook\",\n \"Frank bob number 2 #frank #bob #notebook\",\n \"bob inactive #bob #inactive #notebook\",\n \"bob twin #bob #twin #notebook\",\n \"bob twin #bob #twin #notebook\",\n \"not a notebook #bob\",\n ]\n\n gh = generate_gisthub(names)\n ngh = NotebookGistHub(gh)\n return ngh\n\n\nclass TestNotebookGistHub:\n\n def test_query(self):\n ngh = setup_notebookgisthub()\n results = ngh.query('#bob')\n test = results['#bob']\n for key, gist in test.items():\n # make sure we are keying by keyname and not gist.id\n assert key == gist.key_name\n\n names = [gist.name for gist in test.values()]\n # test that we always check for #notebook via filter_tag\n assert 'not a notebook' not in names\n assert '#notebook' not in results.keys()\n\n @require_github\n def test_live_query(self):\n gisthub = GistHub(hub)\n nbhub = NotebookGistHub(gisthub)\n nbhub.query()\n"},"size":{"kind":"number","value":4620,"string":"4,620"}}},{"rowIdx":128378,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/evaluate-script/run_evaluation.py"},"max_stars_repo_name":{"kind":"string","value":"scc-usc/covid19-forecast-bench"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2169523"},"content":{"kind":"string","value":"import os\nimport datetime\nimport shutil\nimport evaluate\nimport evaluate_eu\n\nmodels = []\n# Evaluate US\nwith open(\"models.txt\", \"w\") as f:\n for directory in os.listdir(\"../../formatted-forecasts/US-COVID/state-death/\"):\n if os.path.isdir(\"../../formatted-forecasts/US-COVID/state-death/\" + directory):\n models.append(directory)\n f.write(directory + '\\n')\n\nwith open(\"forecasts_filenames.txt\", \"w\") as f:\n for m in models:\n if os.path.isdir(\"../../formatted-forecasts/US-COVID/state-death/\" + m):\n for csv in os.listdir(\"../../formatted-forecasts/US-COVID/state-death/\" + m):\n date_num = (datetime.datetime.now() - datetime.datetime(2020, 1, 22)).days\n for i in range(32):\n date_num -= 1\n if \"_{}.csv\".format(date_num) in csv:\n f.write(csv + '\\n')\n\nevaluate.run()\nshutil.rmtree(\"../../evaluation/US-COVID/\")\nshutil.copytree(\"./output/\", \"../../evaluation/US-COVID/\")\nfor directory in os.listdir(\"./output/\"):\n if os.path.isdir(\"./output/{}\".format(directory)):\n shutil.rmtree(\"./output/{}\".format(directory))\n else:\n os.remove(\"./output/{}\".format(directory))\n\n# Clear txt files.\nopen(\"models.txt\", 'w').close()\nopen(\"forecasts_filenames.txt\", 'w').close()\n\n# Evaluate EU\nmodels.clear()\nwith open(\"models.txt\", \"w\") as f:\n for directory in os.listdir(\"../../formatted-forecasts/EU-COVID/eu-death/\"):\n if os.path.isdir(\"../../formatted-forecasts/EU-COVID/eu-death/\" + directory):\n models.append(directory)\n f.write(directory + '\\n')\n\nwith open(\"forecasts_filenames.txt\", \"w\") as f:\n for m in models:\n if os.path.isdir(\"../../formatted-forecasts/EU-COVID/eu-death/\" + m):\n for csv in os.listdir(\"../../formatted-forecasts/EU-COVID/eu-death/\" + m):\n date_num = (datetime.datetime.now() - datetime.datetime(2020, 1, 22)).days\n for i in range(32):\n date_num -= 1\n if \"_{}.csv\".format(date_num) in csv:\n f.write(csv + '\\n')\n\nevaluate_eu.run()\nshutil.rmtree(\"../../evaluation/EU-COVID/\")\nshutil.copytree(\"./output/\", \"../../evaluation/EU-COVID/\")\nfor directory in os.listdir(\"./output/\"):\n if os.path.isdir(\"./output/{}\".format(directory)):\n shutil.rmtree(\"./output/{}\".format(directory))\n else:\n os.remove(\"./output/{}\".format(directory))\n\n# Clear txt files.\nopen(\"models.txt\", 'w').close()\nopen(\"forecasts_filenames.txt\", 'w').close()\n"},"size":{"kind":"number","value":2555,"string":"2,555"}}},{"rowIdx":128379,"cells":{"max_stars_repo_path":{"kind":"string","value":"demo/myimages/imagefiles/apps.py"},"max_stars_repo_name":{"kind":"string","value":"ResonantGeoData/django-large-image"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2170705"},"content":{"kind":"string","value":"import logging\n\nfrom django.apps import AppConfig\nfrom django.conf import settings\n\n\nclass MyImagesConfig(AppConfig):\n name = 'myimages.imagefiles'\n verbose_name = 'My Image Files'\n\n def ready(self):\n if not getattr(settings, 'DEBUG', False):\n logging.getLogger('gdal').setLevel(logging.ERROR)\n logging.getLogger('large_image').setLevel(logging.ERROR)\n logging.getLogger('tifftools').setLevel(logging.ERROR)\n logging.getLogger('pyvips').setLevel(logging.ERROR)\n logging.getLogger('PIL').setLevel(logging.ERROR)\n"},"size":{"kind":"number","value":582,"string":"582"}}},{"rowIdx":128380,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/test_wrappers.py"},"max_stars_repo_name":{"kind":"string","value":"lsnty5190/torchmd-net"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172224"},"content":{"kind":"string","value":"import pytest\nfrom pytest import mark\nfrom torchmdnet import models\nfrom torchmdnet.models.model import create_model\nfrom torchmdnet.models.wrappers import AtomFilter\nfrom utils import load_example_args, create_example_batch\n\n\n@mark.parametrize(\"remove_threshold\", [-1, 2, 5])\n@mark.parametrize(\"model_name\", models.__all__)\ndef test_atom_filter(remove_threshold, model_name):\n # wrap a representation model using the AtomFilter wrapper\n model = create_model(load_example_args(model_name, remove_prior=True))\n model = model.representation_model\n model = AtomFilter(model, remove_threshold)\n\n z, pos, batch = create_example_batch(n_atoms=100)\n x, v, z, pos, batch = model(z, pos, batch, None, None)\n\n assert (z > remove_threshold).all(), (\n f\"Lowest updated atomic number is {z.min()} but \"\n f\"the atom filter is set to {remove_threshold}\"\n )\n assert len(z) == len(\n pos\n ), \"Number of z and pos values doesn't match after AtomFilter\"\n assert len(z) == len(\n batch\n ), \"Number of z and batch values doesn't match after AtomFilter\"\n"},"size":{"kind":"number","value":1094,"string":"1,094"}}},{"rowIdx":128381,"cells":{"max_stars_repo_path":{"kind":"string","value":"visuals/apps.py"},"max_stars_repo_name":{"kind":"string","value":"mujeebishaque/visual-analytics"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170185"},"content":{"kind":"string","value":"from django.apps import AppConfig\n\n\nclass VisualsConfig(AppConfig):\n default_auto_field = 'django.db.models.BigAutoField'\n name = 'visuals'\n"},"size":{"kind":"number","value":146,"string":"146"}}},{"rowIdx":128382,"cells":{"max_stars_repo_path":{"kind":"string","value":"index.py"},"max_stars_repo_name":{"kind":"string","value":"Max-C-G/review-app"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171539"},"content":{"kind":"string","value":"from flask import Flask, render_template\nfrom flask import request\nfrom sklearn.naive_bayes import MultinomialNB\n# from sklearn.metrics import mean_squared_error as mse\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport math\nimport os\nimport pickle\n\n# load the model from disk\nmodel_file = 'nb_model_final.sav'\nvocab_file = 'vocabulary.p'\nnb_classifier = pickle.load(open(model_file, 'rb'))\nvocab = pickle.load(open(vocab_file, 'rb'))\napp = Flask(__name__)\n\n@app.route('/', methods=['GET'])\ndef get_index():\n # print('testing')\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef predict_score():\n # print('review: ', request.form['review'])\n review = request.form['review']\n clf = nb_classifier\n # print(result)\n count_vect_test = CountVectorizer(vocabulary=vocab)\n tester_counts = count_vect_test.fit_transform([review])\n tfidf_transformer = TfidfTransformer()\n tester_tfidf = tfidf_transformer.fit_transform(tester_counts)\n prediction = clf.predict(tester_tfidf)\n print('prediction: ', prediction)\n return render_template('index.html', review = request.form['review'], rating = str(prediction[0]))"},"size":{"kind":"number","value":1348,"string":"1,348"}}},{"rowIdx":128383,"cells":{"max_stars_repo_path":{"kind":"string","value":"controllers/rvoavoidercontroller.py"},"max_stars_repo_name":{"kind":"string","value":"gavincangan/alvin"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171618"},"content":{"kind":"string","value":"from controller import Controller\nimport rvo2, pyglet\nfrom common import Twist, M_TO_PIXELS, MAX_LINEAR_SPEED, MAX_ANGULAR_SPEED\nfrom math import cos, sin, sqrt, pi, atan2\n\n\"\"\"\nA reactive collision avoidance strategy which makes use of the RVO2 library.\n\nImportant: All units in pixels!\n\"\"\"\n\nclass RVOAvoiderController(Controller):\n\n NUMBER_PREF_VELS = 11\n ANGLE_MIN = -pi/2.0\n ANGLE_MAX = pi/2.0\n SIM_STEPS = 1\n\n def __init__(self, sim_steps=1):\n\n self.SIM_STEPS = sim_steps\n\n # Angles of preferred velocities that will be tested each iteration.\n angles = []\n angle_delta = (self.ANGLE_MAX - self.ANGLE_MIN) / \\\n (self.NUMBER_PREF_VELS - 1)\n for i in range(self.NUMBER_PREF_VELS):\n angles.append(self.ANGLE_MIN + i * angle_delta)\n\n self.pref_vels = []\n for angle in angles:\n self.pref_vels.append((MAX_LINEAR_SPEED * cos(angle), \\\n MAX_LINEAR_SPEED * sin(angle)))\n\n self.last_index = angles.index(0)\n self.last_mag = float('inf')\n\n def draw_line_from_robot(self, robot, vx, vy, red, green, blue, thickness):\n x1 = (robot.body.position.x)\n y1 = (robot.body.position.y)\n world_angle = robot.body.angle + atan2(vy, vx)\n mag = sqrt(vx*vx + vy*vy)\n x2 = int(robot.body.position.x + mag * cos(world_angle))\n y2 = int(robot.body.position.y + mag * sin(world_angle))\n pyglet.gl.glLineWidth(thickness)\n pyglet.graphics.draw(2, pyglet.gl.GL_LINES,\n ('v2f', (x1, y1, x2, y2)),\n ('c3B', (red, green, blue, red, green, blue)))\n pyglet.gl.glLineWidth(1)\n\n def react(self, robot, sensor_suite, visualize=False):\n\n range_scan = sensor_suite.range_scan\n #puck_scan = sensor_suite.puck_scan\n\n # We seem to have to create a new simulator object each time because\n # otherwise it would contain the obstacles from the last time step.\n # If there was a 'removeObstacle' method it would be a bit nicer.\n sim = rvo2.PyRVOSimulator(1/60., # Time step\n 1.5, # neighborDist\n 5, # maxNeighbors\n 1.5, # timeHorizon (other agents)\n 1.5, #2 # timeHorizon (obstacles)\n robot.radius, # agent radius\n MAX_LINEAR_SPEED) # agent max speed\n agent = sim.addAgent((0, 0))\n\n # Add range scan points as obstacles for the RVO simulator\n n = len(range_scan.ranges)\n points = []\n for i in range(0, n):\n rho = range_scan.INNER_RADIUS + range_scan.ranges[i]\n #if not (rho == float('inf') or isnan(rho)):\n theta = range_scan.angles[i]\n points.append((rho*cos(theta), rho*sin(theta)))\n\n # Add pucks from the puck scan\n #for puck in puck_scan.pucks:\n # rho = puck.distance\n # theta = puck.angle\n # points.append((rho*cos(theta), rho*sin(theta)))\n\n # Add fake points behind the robot to make it think twice about going\n # backwards.\n #n_fake = 0\n #start_angle = range_scan.ANGLE_MAX\n #stop_angle = range_scan.ANGLE_MIN + 2*pi\n #angle_delta = (stop_angle - start_angle) / (n_fake - 1)\n #for i in range(n_fake):\n # theta = start_angle + i * angle_delta\n # rho = 2 * robot.radius\n # points.append((rho*cos(theta), rho*sin(theta)))\n # if visualize:\n # vx,vy = rho*cos(theta), rho*sin(theta)\n # self.draw_line_from_robot(robot, vx, vy, 0, 0, 255, 1)\n\n # The scan points will be treated together as a single \"negative\"\n # obstacle, with vertices specified in CW order. This requires the\n # following sort.\n points.sort(key = lambda p: -atan2(p[1], p[0]))\n sim.addObstacle(points)\n sim.processObstacles()\n\n # Get the velocity in the robot reference frame with the clockwise\n # rotation matrix\n cos_theta = cos(robot.body.angle)\n sin_theta = sin(robot.body.angle)\n cur_vx = robot.body.velocity.x * cos_theta + \\\n robot.body.velocity.y * sin_theta\n cur_vy = -robot.body.velocity.x * sin_theta + \\\n robot.body.velocity.y * cos_theta\n\n\n # To prevent oscillation we will generally just test the preferred\n # velocities in the immediate neighbourhood (within the pref_vels list)\n # of the preferred velocity chosen last time.\n if self.last_mag < 20:\n # Last time the magnitude of the chosen velocity was very low.\n # Do a full search over the preferred velocities.\n start_index = 0\n stop_index = self.NUMBER_PREF_VELS - 1\n elif self.last_index == 0:\n start_index = 0\n stop_index = 1\n elif self.last_index == len(self.pref_vels)-1:\n start_index = self.NUMBER_PREF_VELS - 2\n stop_index = self.NUMBER_PREF_VELS - 1\n else:\n # This is the general case.\n start_index = self.last_index - 1\n stop_index = self.last_index + 1\n\n highest_mag = 0\n chosen_vel = None\n chosen_index = None\n for i in range(start_index, stop_index+1):\n pref_vel = self.pref_vels[i]\n\n # Initializing from scratch each time\n sim.setAgentPosition(agent, (0, 0))\n sim.setAgentVelocity(agent, (cur_vx, cur_vy))\n sim.setAgentPrefVelocity(agent, pref_vel)\n \n for j in range(self.SIM_STEPS):\n sim.doStep()\n\n (vx, vy) = sim.getAgentVelocity(0)\n #print \"vel: {}, {}\".format(vx, vy)\n if visualize:\n self.draw_line_from_robot(robot, vx, vy, 255, 255, 255, 3)\n\n mag = sqrt(vx*vx + vy*vy)\n if mag > highest_mag:\n highest_mag = mag\n chosen_vel = (vx, vy)\n chosen_index = i\n\n self.last_index = chosen_index\n self.last_mag = highest_mag\n #print \"highest_mag: {}\".format(highest_mag)\n\n #chosen_vel = (avg_vx / len(self.pref_vels),\n # avg_vy / len(self.pref_vels))\n\n if visualize and chosen_vel != None:\n self.draw_line_from_robot(robot, chosen_vel[0], chosen_vel[1], 255, 0, 127, 5)\n\n #print \"MAX_LINEAR_SPEED: {}\".format(MAX_LINEAR_SPEED)\n #print \"current_vel: {}, {}\".format(cur_vx, cur_vy)\n #print \"MAG OF current_vel: {}\".format(sqrt(cur_vx**2+ cur_vy**2))\n #print \"chosen_vel: {}, {}\".format(chosen_vel[0], chosen_vel[1])\n #print \"MAG OF chosen_vel: {}\".format(sqrt(chosen_vel[0]**2+ chosen_vel[1]**2))\n\n # Now treat (vx, vy) as the goal and apply the simple control law\n twist = Twist()\n if chosen_vel != None:\n twist.linear = 0.1 * chosen_vel[0]\n twist.angular = 0.02 * chosen_vel[1]\n else:\n print \"NO AVAILABLE VELOCITY!\"\n #for r in range_scan.ranges:\n # print r\n return twist\n"},"size":{"kind":"number","value":7253,"string":"7,253"}}},{"rowIdx":128384,"cells":{"max_stars_repo_path":{"kind":"string","value":"management_api_app/api/dependencies/database.py"},"max_stars_repo_name":{"kind":"string","value":"LizaShak/AzureTRE"},"max_stars_count":{"kind":"number","value":2,"string":"2"},"id":{"kind":"string","value":"2172133"},"content":{"kind":"string","value":"import logging\nfrom typing import Callable, Type\n\nfrom azure.cosmos import CosmosClient\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom starlette.requests import Request\nfrom starlette.status import HTTP_503_SERVICE_UNAVAILABLE\n\nfrom core import config\nfrom db.errors import UnableToAccessDatabase\nfrom db.repositories.base import BaseRepository\nfrom resources import strings\n\n\ndef connect_to_db() -> CosmosClient:\n logging.debug(f\"Connecting to {config.STATE_STORE_ENDPOINT}\")\n\n try:\n if config.DEBUG:\n # ignore TLS(setup is pain) when on dev container and connecting to cosmosdb on windows host.\n cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY,\n connection_verify=False)\n else:\n cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY)\n logging.debug(\"Connection established\")\n return cosmos_client\n except Exception as e:\n logging.debug(f\"Connection to state store could not be established: {e}\")\n\n\ndef get_db_client(app: FastAPI) -> CosmosClient:\n if not app.state.cosmos_client:\n app.state.cosmos_client = connect_to_db()\n return app.state.cosmos_client\n\n\ndef get_db_client_from_request(request: Request) -> CosmosClient:\n return get_db_client(request.app)\n\n\ndef get_repository(repo_type: Type[BaseRepository]) -> Callable[[CosmosClient], BaseRepository]:\n def _get_repo(client: CosmosClient = Depends(get_db_client_from_request)) -> BaseRepository:\n try:\n return repo_type(client)\n except UnableToAccessDatabase:\n raise HTTPException(status_code=HTTP_503_SERVICE_UNAVAILABLE, detail=strings.STATE_STORE_ENDPOINT_NOT_RESPONDING)\n\n return _get_repo\n"},"size":{"kind":"number","value":1796,"string":"1,796"}}},{"rowIdx":128385,"cells":{"max_stars_repo_path":{"kind":"string","value":"lib/sds/metrics/metricscollector.py"},"max_stars_repo_name":{"kind":"string","value":"GeorryHuang/galaxy-sdk-python"},"max_stars_count":{"kind":"number","value":17,"string":"17"},"id":{"kind":"string","value":"2172450"},"content":{"kind":"string","value":"import Queue\nimport time\nfrom sds.admin.ttypes import ClientMetrics\nimport threading\nfrom sds.metrics.Common import UPLOAD_INTERVAL\n\n\nclass MetricsCollector:\n def __init__(self, metric_admin_client):\n self.queue = Queue.Queue(0)\n self.metric_admin_client = metric_admin_client\n metric_upload_thread = MetricUploadThread(self.queue, self.metric_admin_client)\n metric_upload_thread.setDaemon(True)\n metric_upload_thread.start()\n\n def collect(self, request_metrics):\n client_metrics = request_metrics.to_client_metrics()\n for k in client_metrics.metricDataList:\n self.queue.put(k)\n\n\nclass MetricUploadThread(threading.Thread):\n def __init__(self, queue, metric_admin_client):\n super(MetricUploadThread, self).__init__()\n self.queue = queue\n self.name = \"sds-python-sdk-metrics-uploader\"\n self.metric_admin_client = metric_admin_client\n\n def run(self):\n while True:\n try:\n start_time = time.time() * 1000\n client_metrics = ClientMetrics()\n metrics_data_list = []\n while True:\n elapsed_time = time.time() * 1000 - start_time\n if elapsed_time > UPLOAD_INTERVAL:\n break\n else:\n try:\n metricData = self.queue.get(True, (UPLOAD_INTERVAL - elapsed_time) / 1000)\n except Queue.Empty as em:\n break\n metrics_data_list.append(metricData)\n client_metrics.metricDataList = metrics_data_list\n self.metric_admin_client.putClientMetrics(client_metrics)\n except Exception as e:\n pass"},"size":{"kind":"number","value":1564,"string":"1,564"}}},{"rowIdx":128386,"cells":{"max_stars_repo_path":{"kind":"string","value":"utils/loss.py"},"max_stars_repo_name":{"kind":"string","value":"HibikiJie/MONet"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172300"},"content":{"kind":"string","value":"from torch import nn\nimport torch\n\n\nclass FocalLoss(nn.Module):\n\n def __init__(self, gamma=2, alpha=0.25, r=1e-19):\n \"\"\"\n :param gamma: gamma>0减少易分类样本的损失。使得更关注于困难的、错分的样本。越大越关注于困难样本的学习\n :param alpha:调节正负样本比例\n :param r:数值稳定系数。\n \"\"\"\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.alpha = alpha\n self.bce_loss = nn.BCELoss()\n self.r = r\n\n def forward(self, p, target):\n target = target.float()\n p_min = p.min()\n p_max = p.max()\n if p_min < 0 or p_max > 1:\n raise ValueError('The range of predicted values should be [0, 1]')\n p = p.reshape(-1, 1)\n target = target.reshape(-1, 1)\n loss = -self.alpha * (1 - p) ** self.gamma * (target * torch.log(p + self.r)) - \\\n (1 - self.alpha) * p ** self.gamma * ((1 - target) * torch.log(1 - p + self.r))\n return loss.mean()\n\n\nclass FocalLossManyClassification(nn.Module):\n\n def __init__(self, num_class, alpha=None, gamma=2, smooth=None, epsilon=1e-19):\n \"\"\"\n FocalLoss,适用于多分类。输入带有softmax,无需再softmax。\n :param num_class: 类别数。\n :param alpha: 各类别权重系数,输入列表,长度需要与类别数相同。\n :param gamma: 困难样本学习力度\n :param smooth: 标签平滑系数\n :param epsilon: 数值稳定系数\n \"\"\"\n super(FocalLossManyClassification, self).__init__()\n self.num_class = num_class\n self.alpha = alpha\n self.gamma = gamma\n self.smooth = smooth\n\n if self.alpha is None:\n self.alpha = torch.ones(self.num_class, 1)\n elif isinstance(self.alpha, list):\n assert len(self.alpha) == self.num_class\n self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1)\n self.alpha = self.alpha / self.alpha.sum()\n else:\n raise TypeError('Not support alpha type')\n\n if self.smooth is not None:\n if self.smooth < 0 or self.smooth > 1.0:\n raise ValueError('Smooth value should be in [0,1]')\n self.epsilon = epsilon\n\n def forward(self, input_, target):\n '''softmax激活'''\n logit = torch.softmax(input_, dim=1)\n\n if logit.dim() > 2:\n raise ValueError('The input dimension should be 2')\n target = target.reshape(-1, 1)\n\n alpha = self.alpha\n if alpha.device != input_.device:\n alpha = alpha.to(input_.device)\n\n idx = target.cpu().long()\n one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_()\n one_hot_key = one_hot_key.scatter_(1, idx, 1)\n if one_hot_key.device != logit.device:\n one_hot_key = one_hot_key.to(logit.device)\n\n if self.smooth:\n one_hot_key = torch.clamp(\n one_hot_key, self.smooth, 1.0 - self.smooth)\n pt = (one_hot_key * logit).sum(1) + self.epsilon\n log_pt = pt.log()\n\n alpha = alpha[idx]\n loss = -1 * alpha * ((1 - pt) ** self.gamma) * log_pt\n\n return loss.mean()\n\n\nif __name__ == '__main__':\n f = FocalLossManyClassification(10, alpha=[1, 2, 15, 4, 8, 6, 7, 7, 9, 4], smooth=0.1)\n predict = torch.randn(64, 10, requires_grad=True)\n targets = torch.randint(0, 9, (64,))\n loss = f(torch.sigmoid(predict), targets)\n print(loss)\n loss.backward()\n # print(targets)\n"},"size":{"kind":"number","value":3322,"string":"3,322"}}},{"rowIdx":128387,"cells":{"max_stars_repo_path":{"kind":"string","value":"src/default_documents/migrations/0048_auto_20160215_1502.py"},"max_stars_repo_name":{"kind":"string","value":"Talengi/phase"},"max_stars_count":{"kind":"number","value":8,"string":"8"},"id":{"kind":"string","value":"2172301"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('default_documents', '0047_auto_20160211_0835'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='contractordeliverablerevision',\n old_name='trs_comments',\n new_name='file_transmitted',\n ),\n ]\n"},"size":{"kind":"number","value":397,"string":"397"}}},{"rowIdx":128388,"cells":{"max_stars_repo_path":{"kind":"string","value":"scripts/generate-event-enum.py"},"max_stars_repo_name":{"kind":"string","value":"GaloisInc/myxine"},"max_stars_count":{"kind":"number","value":41,"string":"41"},"id":{"kind":"string","value":"2170919"},"content":{"kind":"string","value":"#! /usr/bin/env python3\n\nimport sys\nimport json\nfrom typing import *\n\ndef generate_rust(events):\n # Generate names of variants\n variants = []\n for name_words, properties in events:\n name = ''.join(word.title() for word in name_words)\n variants.append((name, properties))\n # Actually output the text\n lines = []\n lines.append('#[non_exhaustive]')\n lines.append('#[derive(Clone, Debug, Serialize, Deserialize)]')\n lines.append('#[serde(rename_all = \"lowercase\", tag = \"event\", content = \"properties\")]')\n lines.append('enum Event {')\n for name, properties in variants:\n lines.append(' #[non_exhaustive]')\n lines.append(' ' + name + ' { ')\n items = list(properties.items())\n items.sort()\n for field, type in items:\n lines.append(' ' + field + ': ' + type + ',')\n lines.append(' },')\n lines.append('}')\n return '\\n'.join(lines)\n\nlanguages = {\n 'rust': generate_rust\n}\n\ndef main():\n try: _, language, filename = sys.argv\n except: print(\"Wrong number of arguments: please specify output language and interface definition JSON file.\", file=sys.stderr)\n\n try: generate = languages[language]\n except: print(\"Invalid language: \" + language, file=sys.stderr)\n\n try:\n with open(filename) as x: spec = json.loads(x.read())\n except: print(\"Couldn't open file: \" + filename, file=sys.stderr)\n spec_events = spec['events']\n spec_interfaces = spec['interfaces']\n events = []\n for event, event_info in spec_events.items():\n interface = event_info['interface']\n name_words = event_info['nameWords']\n fields = accum_fields(interface, spec_interfaces)\n events.append((name_words, fields))\n print(generate(events))\n\n# Accumulate all the fields in all super-interfaces of the given interface\ndef accum_fields(interface, interfaces):\n properties = {}\n while True:\n for property, type in interfaces[interface]['properties'].items():\n if properties.get(property) is None:\n properties[property] = type\n if interfaces[interface]['inherits'] is None: break\n else: interface = interfaces[interface]['inherits']\n return properties\n\nif __name__ == '__main__': main()\n"},"size":{"kind":"number","value":2281,"string":"2,281"}}},{"rowIdx":128389,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/calculator/main.py"},"max_stars_repo_name":{"kind":"string","value":"cccaaannn/PracticeProjects"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2171546"},"content":{"kind":"string","value":"def menu():\n print(\"---------- ---------- Welcome to calculator ---------- ----------\\n\")\n valid_operations = (0, 1, 2, 3, 4)\n\n while True:\n operation = input(\"---------- Please choose operation ----------\\n0-exit\\n1-sum\\n2-subtract\\n3-multiply\\n4-divide\\n: \")\n print()\n\n try:\n operation = int(operation)\n if(operation not in valid_operations):\n raise ValueError\n except:\n print(\"Invalid value entered\\n\")\n continue\n\n if(operation == 0):\n break\n else:\n try:\n num1 = float(input(\"Enter num1: \"))\n num2 = float(input(\"Enter num2: \"))\n except:\n print(\"Invalid value entered\\n\")\n continue\n\n result = \"unknown\"\n match operation:\n case 1:\n result = num1 + num2\n case 2:\n result = num1 - num2\n case 3:\n result = num1 * num2\n case 4:\n result = num1 / num2\n case _:\n continue\n\n print(f\"Result is: {result}\")\n print(\"---------- ---------- ---------- ---------- ----------\\n\")\n\nif __name__ == '__main__':\n menu()\n"},"size":{"kind":"number","value":1322,"string":"1,322"}}},{"rowIdx":128390,"cells":{"max_stars_repo_path":{"kind":"string","value":"sopa/src/models/odenet_mnist/layers.py"},"max_stars_repo_name":{"kind":"string","value":"SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers"},"max_stars_count":{"kind":"number","value":25,"string":"25"},"id":{"kind":"string","value":"2171509"},"content":{"kind":"string","value":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom functools import partial\n\n\nclass MetaODEBlock(nn.Module):\n def __init__(self, activation_type = 'relu'):\n super(MetaODEBlock, self).__init__()\n \n self.rhs_func = ODEfunc(64, activation_type)\n self.integration_time = torch.tensor([0, 1]).float()\n \n \n def forward(self, x, solvers, solver_options):\n nsolvers = len(solvers)\n \n if solver_options.solver_mode == 'standalone':\n y = solvers[0].integrate(self.rhs_func, x = x, t = self.integration_time)\n\n elif solver_options.solver_mode == 'switch':\n if solver_options.switch_probs is not None:\n switch_probs = solver_options.switch_probs\n else:\n switch_probs = [1./nsolvers for _ in range(nsolvers)]\n solver_id = np.random.choice(range(nsolvers), p = switch_probs)\n solver_options.switch_solver_id = solver_id\n\n y = solvers[solver_id].integrate(self.rhs_func, x = x, t = self.integration_time)\n\n elif solver_options.solver_mode == 'ensemble':\n coin_flip = torch.bernoulli(torch.tensor((1,)), solver_options.ensemble_prob)\n solver_options.ensemble_coin_flip = coin_flip\n\n if coin_flip :\n if solver_options.ensemble_weights is not None:\n ensemble_weights = solver_options.ensemble_weights\n else: \n ensemble_weights = [1./nsolvers for _ in range(nsolvers)]\n\n for i, (wi, solver) in enumerate(zip(ensemble_weights, solvers)):\n if i == 0:\n y = wi * solver.integrate(self.rhs_func, x = x, t = self.integration_time)\n else:\n y += wi * solver.integrate(self.rhs_func, x = x, t = self.integration_time)\n else:\n y = solvers[0].integrate(self.rhs_func, x = x, t = self.integration_time)\n \n return y[-1,:,:,:,:]\n \n \n def ss_loss(self, y, solvers, solver_options):\n z0 = y\n rhs_func_ss = partial(self.rhs_func, ss_loss = True)\n integration_time_ss = self.integration_time + 1\n \n nsolvers = len(solvers)\n \n if solver_options.solver_mode == 'standalone':\n z = solvers[0].integrate(rhs_func_ss.func, x = y, t = integration_time_ss)\n\n elif solver_options.solver_mode == 'switch':\n if solver_options.switch_probs is not None:\n switch_probs = solver_options.switch_probs\n else:\n switch_probs = [1./nsolvers for _ in range(nsolvers)]\n solver_id = solver_options.switch_solver_id\n\n z = solvers[solver_id].integrate(rhs_func_ss.func, x = y, t = integration_time_ss)\n\n elif solver_options.solver_mode == 'ensemble':\n coin_flip = solver_options.ensemble_coin_flip\n\n if coin_flip :\n if solver_options.ensemble_weights is not None:\n ensemble_weights = solver_options.ensemble_weights\n else: \n ensemble_weights = [1./nsolvers for _ in range(nsolvers)]\n\n for i, (wi, solver) in enumerate(zip(ensemble_weights, solvers)):\n if i == 0:\n z = wi * solver.integrate(rhs_func_ss.func, x = y, t = integration_time_ss)\n else:\n z += wi * solver.integrate(rhs_func_ss.func, x = y, t = integration_time_ss)\n else:\n z = solvers[0].integrate(rhs_func_ss.func, x = y, t = integration_time_ss)\n \n z = z[-1,:,:,:,:] - z0\n z = torch.norm(z.reshape((z.shape[0], -1)), dim = 1)\n z = torch.mean(z)\n \n return z\n\n\nclass MetaNODE(nn.Module):\n \n def __init__(self, downsampling_method = 'conv', is_odenet = True, activation_type = 'relu', in_channels = 1):\n super(MetaNODE, self).__init__()\n \n self.is_odenet = is_odenet\n \n self.downsampling_layers = nn.Sequential(*build_downsampling_layers(downsampling_method, in_channels))\n self.fc_layers = nn.Sequential(*build_fc_layers())\n \n if is_odenet:\n self.blocks = nn.ModuleList([MetaODEBlock(activation_type)])\n else:\n self.blocks = nn.ModuleList([ResBlock(64, 64) for _ in range(6)])\n \n \n def forward(self, x, solvers=None, solver_options=None, loss_options = None):\n self.ss_loss = 0\n \n x = self.downsampling_layers(x)\n \n for block in self.blocks:\n if self.is_odenet:\n x = block(x, solvers, solver_options)\n \n if (loss_options is not None) and loss_options.ss_loss:\n z = block.ss_loss(x, solvers, solver_options)\n self.ss_loss += z\n else:\n x = block(x)\n\n x = self.fc_layers(x)\n return x\n \n def get_ss_loss(self):\n return self.ss_loss \n \n\nclass ODEfunc(nn.Module):\n\n def __init__(self, dim, activation_type = 'relu'):\n super(ODEfunc, self).__init__()\n \n if activation_type == 'tanh':\n activation = nn.Tanh()\n elif activation_type == 'softplus':\n activation = nn.Softplus()\n elif activation_type == 'softsign':\n activation = nn.Softsign()\n elif activation_type == 'relu':\n activation = nn.ReLU()\n else:\n raise NotImplementedError('{} activation is not implemented'.format(activation_type))\n\n self.norm1 = norm(dim)\n self.relu = nn.ReLU(inplace=True)\n self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1)\n self.norm2 = norm(dim)\n self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1)\n self.norm3 = norm(dim)\n self.nfe = 0\n\n def forward(self, t, x, ss_loss = False):\n self.nfe += 1\n out = self.norm1(x)\n out = self.relu(out)\n out = self.conv1(t, out)\n out = self.norm2(out)\n out = self.relu(out)\n out = self.conv2(t, out)\n out = self.norm3(out)\n \n if ss_loss:\n out = torch.abs(out)\n\n return out\n \ndef build_downsampling_layers(downsampling_method = 'conv', in_channels = 1):\n if downsampling_method == 'conv':\n downsampling_layers = [\n nn.Conv2d(in_channels, 64, 3, 1),\n norm(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, 4, 2, 1),\n norm(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, 4, 2, 1),\n ]\n elif downsampling_method == 'res':\n downsampling_layers = [\n nn.Conv2d(in_channels, 64, 3, 1),\n ResBlock(64, 64, stride=2, downsample=conv1x1(64, 64, 2)),\n ResBlock(64, 64, stride=2, downsample=conv1x1(64, 64, 2)),\n ]\n return downsampling_layers\n\n\ndef build_fc_layers():\n fc_layers = [norm(64), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)), Flatten(), nn.Linear(64, 10)]\n return fc_layers\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\ndef norm(dim):\n return nn.GroupNorm(min(32, dim), dim)\n\n\nclass ResBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(ResBlock, self).__init__()\n self.norm1 = norm(inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.norm2 = norm(planes)\n self.conv2 = conv3x3(planes, planes)\n\n def forward(self, x):\n shortcut = x\n\n out = self.relu(self.norm1(x))\n\n if self.downsample is not None:\n shortcut = self.downsample(out)\n\n out = self.conv1(out)\n out = self.norm2(out)\n out = self.relu(out)\n out = self.conv2(out)\n\n return out + shortcut\n\n\nclass ConcatConv2d(nn.Module):\n\n def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False):\n super(ConcatConv2d, self).__init__()\n module = nn.ConvTranspose2d if transpose else nn.Conv2d\n self._layer = module(\n dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups,\n bias=bias\n )\n\n def forward(self, t, x):\n tt = torch.ones_like(x[:, :1, :, :]) * t\n ttx = torch.cat([tt, x], 1)\n return self._layer(ttx)\n\n\nclass Flatten(nn.Module):\n\n def __init__(self):\n super(Flatten, self).__init__()\n\n def forward(self, x):\n shape = torch.prod(torch.tensor(x.shape[1:])).item()\n return x.view(-1, shape)\n \n \n"},"size":{"kind":"number","value":9094,"string":"9,094"}}},{"rowIdx":128391,"cells":{"max_stars_repo_path":{"kind":"string","value":"python/ray/rllib/RL/BRL/policy_iter.py"},"max_stars_repo_name":{"kind":"string","value":"christopher-hsu/ray"},"max_stars_count":{"kind":"number","value":1,"string":"1"},"id":{"kind":"string","value":"2171937"},"content":{"kind":"string","value":"\"\"\" \n\n\nAuthor: ()\nAffiliation: University of Pennsylvania\n\"\"\"\nimport numpy as np\nimport seeding\n\ndef policy_iter(env, discount, threshold, T=5000):\n\n\tV = np.zeros(env.snum)\n\tpolicy = np.random.choice(env.anum, env.snum)\n\tnp_random, _ = seeding.np_random(None)\n\tp_stable = False\n\ttrans_dict = {}\n\trew_dict = {}\n\tslip_prob = env.slip\n\tif env.stochastic_reward:\n\t\tslip_prob_r = env.slip_r\n\tfor state in env.eff_states:\n\t\tfor action in range(env.anum):\n\t\t\ttransM = np.zeros(env.snum)\n\t\t\trewM = np.zeros(env.snum)\n\n\t\t\tif env.stochastic:\n\t\t\t\tenv.slip = 0.0\n\t\t\t\tif env.stochastic_reward:\n\t\t\t\t\tenv.slip_r = 0.0\n\t\t\t\t\tr0, s_n0, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n0] = 1.0-slip_prob\n\t\t\t\t\trewM[s_n0] = (1.0-slip_prob_r)*r0\n\n\t\t\t\t\tenv.slip_r = 1.0\n\t\t\t\t\tr1, s_n1, _ = env.observe(state,action,np_random)\n\t\t\t\t\trewM[s_n1] += slip_prob_r*r1\n\t\t\t\t\tassert(s_n0 == s_n1)\n\t\t\t\telse:\n\t\t\t\t\tr0, s_n0, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n0] = 1.0-slip_prob\n\t\t\t\t\trewM[s_n0] = r0\n\n\t\t\t\tenv.slip = 1.0\n\t\t\t\tif env.stochastic_reward:\n\t\t\t\t\tenv.slip_r = 0.0\n\t\t\t\t\tr0, s_n0, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n0] = 1.0-slip_prob\n\t\t\t\t\trewM[s_n0] = (1.0-slip_prob_r)*r0\n\n\t\t\t\t\tenv.slip_r = 1.0\n\t\t\t\t\tr1, s_n1, _ = env.observe(state,action,np_random)\n\t\t\t\t\trewM[s_n1] += slip_prob_r*r1\n\t\t\t\telse:\n\t\t\t\t\tr1, s_n1, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n1] = slip_prob\n\t\t\t\t\trewM[s_n1] = r1\n\t\t\telse:\n\t\t\t\tif env.stochastic_reward:\n\t\t\t\t\tenv.slip_r = 0.0\n\t\t\t\t\tr0, s_n0, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n0] = 1.0\n\t\t\t\t\trewM[s_n0] = (1.0-slip_prob_r)*r0\n\n\t\t\t\t\tenv.slip_r = 1.0\n\t\t\t\t\tr1, s_n1, _ = env.observe(state,action,np_random)\n\t\t\t\t\tif s_n1 != s_n0:\n\t\t\t\t\t\tprint(\"Transition is stochastic!\")\n\t\t\t\t\trewM[s_n1] += slip_prob_r*r1\n\t\t\t\telse:\n\t\t\t\t\tr0, s_n0, _ = env.observe(state,action,np_random)\n\t\t\t\t\ttransM[s_n0] = 1.0\n\t\t\t\t\trewM[s_n0] = r0\n\n\t\t\ttrans_dict[(state,action)] = transM\n\t\t\trew_dict[(state,action)] = rewM\n\tit = 0\n\tenv.slip = slip_prob\n\tif env.stochastic_reward:\n\t\tenv.slip_r = slip_prob_r\n\twhile(not p_stable):\n\t\tdelta = 1.0\n\t\tt = 0\n\t\twhile(delta > threshold and t < T):\n\t\t\tdelta = 0\n\t\t\tfor s in env.eff_states:\n\t\t\t\tv_prev = V[s]\n\t\t\t\tV[s] = sum([ trans_dict[(s,policy[s])][s_next] * (rew_dict[(s,policy[s])][s_next] \\\n\t\t\t\t\t\t+ int((s_next=env.goal[1]))*discount*V[s_next]) \\\n\t\t\t\t\t\tfor s_next in range(env.snum)])\n\t\t\t\tdelta = max(delta, abs(v_prev-V[s]))\n\t\t\tt += 1\n\t\tp_stable = True\n\t\tfor s in env.eff_states:\n\t\t\tu_old = policy[s]\n\t\t\tq_val = [sum([ trans_dict[(s,u)][s_next] * (rew_dict[(s,u)][s_next] \\\n\t\t\t\t\t+ int((s_next=env.goal[1]))*discount*V[s_next]) \\\n\t\t\t\t\tfor s_next in range(env.snum)]) for u in range(env.anum)]\n\n\t\t\tif max(q_val) - min(q_val) < 0.001:\n\t\t\t\tpolicy[s] = 0\n\t\t\telse:\n\t\t\t\tpolicy[s] = np.argmax(q_val)\n\t\t\t\tif not(u_old == policy[s]):\n\t\t\t\t\tp_stable = False\n\t\tit+=1\n\tprint(\"after %d iterations\"%it)\n\tQ = np.zeros((env.snum,env.anum))\n\tfor s in env.eff_states:\n\t\tfor a in range(env.anum):\n\t\t\tQ[s][a] = sum([ trans_dict[(s,a)][s_next] * (rew_dict[(s,a)][s_next] \\\n\t\t\t\t\t\t+ int((s_next=env.goal[1]))*discount*V[s_next]) \\\n\t\t\t\t\t\tfor s_next in range(env.snum)])\n\n\treturn V, Q, policy\n\ndef plot_V_pi(Vs, pis, env):\n for (V, pi) in zip(Vs, pis):\n plt.figure(figsize=(3, 3))\n w = int(np.sqrt(V.shape[0]))\n plt.imshow(V.reshape(w, w), cmap='gray',\n interpolation='none', clim=(0, 1))\n ax = plt.gca()\n ax.set_xticks(np.arange(w) - .5)\n ax.set_yticks(np.arange(w) - .5)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n Y, X = np.mgrid[0:w, 0:w]\n a2uv = {0: (-1, 0), 1: (0, -1), 2: (1, 0), 3: (-1, 0)}\n Pi = pi.reshape(w, w)\n for y in range(w):\n for x in range(w):\n a = Pi[y, x]\n u, v = a2uv[a]\n plt.arrow(x, y, u * .3, -v * .3, color='m',\n head_width=0.1, head_length=0.1)\n plt.text(x, y, str(env.desc[y, x].item().decode()),\n color='g', size=12, verticalalignment='center',\n horizontalalignment='center', fontweight='bold')\n plt.grid(color='b', lw=2, ls='-')\n\n\n"},"size":{"kind":"number","value":4282,"string":"4,282"}}},{"rowIdx":128392,"cells":{"max_stars_repo_path":{"kind":"string","value":"setup.py"},"max_stars_repo_name":{"kind":"string","value":"unicef/unicef-rest-export"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170554"},"content":{"kind":"string","value":"#!/usr/bin/env python\nimport ast\nimport codecs\nimport os.path\nimport re\nimport subprocess\nimport sys\nfrom codecs import open\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.sdist import sdist as BaseSDistCommand\n\nROOT = os.path.realpath(os.path.dirname(__file__))\ninit = os.path.join(ROOT, 'src', 'unicef_rest_export', '__init__.py')\n_version_re = re.compile(r'__version__\\s+=\\s+(.*)')\n_name_re = re.compile(r'NAME\\s+=\\s+(.*)')\n\nsys.path.insert(0, os.path.join(ROOT, 'src'))\n\nwith open(init, 'rb') as f:\n content = f.read().decode('utf-8')\n VERSION = str(ast.literal_eval(_version_re.search(content).group(1)))\n NAME = str(ast.literal_eval(_name_re.search(content).group(1)))\n\n\nsetup(\n name=NAME,\n version=VERSION,\n url='https://github.com/unicef/unicef-rest-export',\n author='UNICEF',\n author_email='',\n license=\"Apache 2 License\",\n description='Django package that handles exporting of data',\n long_description=codecs.open('README.rst').read(),\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n install_requires=(\n 'django',\n 'djangorestframework-csv',\n 'djangorestframework',\n 'lxml',\n 'python-docx',\n 'pytz',\n 'pyyaml',\n 'reportlab',\n 'tablib[html,xlsx,xls]',\n 'xlrd',\n 'xlwt',\n ),\n extras_require={\n 'test': (\n 'coverage',\n 'factory-boy',\n 'faker',\n 'flake8',\n 'isort',\n 'pytest-cov',\n 'pytest-django',\n 'pytest-echo',\n 'pytest-pythonpath',\n 'pytest',\n 'psycopg2',\n ),\n },\n platforms=['any'],\n classifiers=[\n 'Environment :: Web Environment',\n 'Programming Language :: Python :: 3.9',\n 'Framework :: Django',\n 'Framework :: Django :: 3.2',\n 'Framework :: Django :: 4.0',\n 'Intended Audience :: Developers'],\n scripts=[],\n)\n"},"size":{"kind":"number","value":2133,"string":"2,133"}}},{"rowIdx":128393,"cells":{"max_stars_repo_path":{"kind":"string","value":"tests/unit/raptiformica/utils/test_ensure_directory.py"},"max_stars_repo_name":{"kind":"string","value":"vdloo/raptiformica"},"max_stars_count":{"kind":"number","value":21,"string":"21"},"id":{"kind":"string","value":"2171584"},"content":{"kind":"string","value":"from raptiformica.utils import ensure_directory\nfrom tests.testcase import TestCase\n\n\nclass TestEnsureDirectory(TestCase):\n def setUp(self):\n self.makedirs = self.set_up_patch('raptiformica.utils.makedirs')\n\n def test_ensure_directory_makes_dirs_if_path_does_not_exist(self):\n ensure_directory('/tmp/directory')\n\n self.makedirs.assert_called_once_with('/tmp/directory')\n\n def test_ensure_directory_does_not_raise_exception_if_dir_already_exists(self):\n self.makedirs.side_effect = FileExistsError\n\n ensure_directory('/tmp/directory')\n"},"size":{"kind":"number","value":578,"string":"578"}}},{"rowIdx":128394,"cells":{"max_stars_repo_path":{"kind":"string","value":"abackend-env/lib/python3.5/site-packages/django_extensions/compat.py"},"max_stars_repo_name":{"kind":"string","value":"mhotwagner/abackend"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172376"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport sys\n\nimport django\nfrom django.conf import settings\n\n# flake8: noqa\n\n#\n# Python compatibility\n#\nPY3 = sys.version_info[0] == 3\nOLD_PY2 = sys.version_info[:2] < (2, 7)\n\nif PY3: # pragma: no cover\n from io import StringIO\n import importlib\n\nelif OLD_PY2: # pragma: no cover\n from cStringIO import StringIO\n from django.utils import importlib\n\nelse: # pragma: no cover\n from cStringIO import StringIO\n import importlib\n\n#\n# Django compatibility\n#\ntry: # Django 1.5\n from django.contrib.auth import get_user_model\nexcept ImportError: # pragma: no cover\n assert django.VERSION < (1, 5)\n from django.contrib.auth.models import User\n User.USERNAME_FIELD = \"username\"\n User.get_username = lambda self: self.username\n\n def get_user_model():\n return User\n\n\ndef list_apps():\n try:\n # django >= 1.7, to support AppConfig\n from django.apps import apps\n return [app.name for app in apps.get_app_configs()]\n except ImportError:\n # old way\n return settings.INSTALLED_APPS\n\n\ndef get_apps():\n try:\n # django >= 1.7, to support AppConfig\n from django.apps import apps\n return [app.models_module for app in apps.get_app_configs() if app.models_module]\n except ImportError:\n from django.db import models\n return models.get_apps()\n\n\ndef get_app_models(app_labels=None):\n if app_labels is None:\n try:\n # django >= 1.7, to support AppConfig\n from django.apps import apps\n return apps.get_models(include_auto_created=True)\n except ImportError:\n from django.db import models\n return models.get_models(include_auto_created=True)\n\n if not isinstance(app_labels, (list, tuple, set)):\n app_labels = [app_labels]\n\n app_models = []\n try:\n # django >= 1.7, to support AppConfig\n from django.apps import apps\n\n for app_label in app_labels:\n app_config = apps.get_app_config(app_label)\n app_models.extend(app_config.get_models(include_auto_created=True))\n except ImportError:\n from django.db import models\n\n try:\n app_list = [models.get_app(app_label) for app_label in app_labels]\n except (models.ImproperlyConfigured, ImportError) as e:\n raise CommandError(\"%s. Are you sure your INSTALLED_APPS setting is correct?\" % e)\n\n for app in app_list:\n app_models.extend(models.get_models(app, include_auto_created=True))\n\n return app_models\n"},"size":{"kind":"number","value":2576,"string":"2,576"}}},{"rowIdx":128395,"cells":{"max_stars_repo_path":{"kind":"string","value":"ecg_arrythmia/ecg_classification.py"},"max_stars_repo_name":{"kind":"string","value":"zabir-nabil/ecg-arrythmia"},"max_stars_count":{"kind":"number","value":4,"string":"4"},"id":{"kind":"string","value":"2171478"},"content":{"kind":"string","value":"\n# coding: utf-8\n\n# In[1]:\n\nfrom scipy import io, signal\nimport matplotlib.pyplot as plt\nimport dtcwt\nimport numpy as np\nimport itertools\nimport pywt\n\n\n# In[35]:\n\ntest_path = 'MLII/reformatted_dataset/normal/100m (0)_nsr.mat'\nta = io.loadmat(test_path)\n\n\n# In[36]:\n\nprint(ta['val'])\n\n\n# In[37]:\n\nprint(ta)\n\n\n# In[38]:\n\nta = ta['val']\n\n\n# In[39]:\n\nprint(type(ta))\n\n\n# In[40]:\n\nta.shape\n\n\n# In[41]:\n\nprint(ta)\n\n\n# In[42]:\n\nimport numpy as np\nta = np.array(ta)\n\n\n# In[43]:\n\nta = np.reshape(ta, (3600,))\n\n\n# In[44]:\n\nimport matplotlib.pyplot as plt\nplt.plot(ta)\nplt.show()\n\n\n# In[53]:\n\ndef plot_ecg(path, tit):\n ta = io.loadmat(path)\n ta = ta['val']\n ta = np.array(ta)\n ta = np.reshape(ta, (ta.shape[1],))\n plt.plot(ta)\n plt.title(tit)\n plt.show()\n\n\n# In[79]:\n\ndef get_ecg(path):\n ta = io.loadmat(path)\n ta = ta['val']\n ta = np.array(ta)\n ta = np.reshape(ta, (ta.shape[1],))\n return ta\n\n\n# In[54]:\n\nplot_ecg('MLII/reformatted_dataset/normal/100m (0)_nsr.mat', 'Normal Sinus Rhythm')\n\n\n# In[55]:\n\nplot_ecg('MLII/reformatted_dataset/normal/107m (5)_pr.mat', 'Pacemaker Rhythm')\n\n\n# In[56]:\n\nplot_ecg('MLII/reformatted_dataset/arythmia/100m (0)_apb.mat', 'Atrial Premature Beats')\n\n\n# In[57]:\n\n# arythmia detection\n\n\n# In[80]:\n\nx = get_ecg('MLII/reformatted_dataset/arythmia/100m (0)_apb.mat')\n\n\n# In[81]:\n\nfrom pywt import wavedec\ncoeffs = wavedec(x, 'db4', level=2)\ncA2, cD2, cD1 = coeffs\n\n\n# In[82]:\n\nplt.plot(cA2)\nplt.show()\n\nplt.plot(cD2)\nplt.show()\n\nplt.plot(cD1)\nplt.show()\n\n\n# In[83]:\n\n# data process\nimport glob\n\nnx = []\nax = []\n\nfor f in glob.glob('MLII/reformatted_dataset/normal/*.mat'):\n nx.append(get_ecg(f))\n \n\nfor f in glob.glob('MLII/reformatted_dataset/arythmia/*mat'):\n ax.append(get_ecg(f))\n\n\n# In[85]:\n\nprint(len(nx))\nprint(len(ax))\n\n\n# In[77]:\n\nimport pandas as pd\nfrom numpy.linalg import LinAlgError\nfrom statsmodels.tsa.stattools import adfuller\n#1\ndef AE(x): # Absolute Energy\n x = np.asarray(x)\n return sum(x * x)\n\n#2\ndef SM2(y):\n #t1 = time.time()\n f, Pxx_den = signal.welch(y)\n sm2 = 0\n n = len(f)\n for i in range(0,n):\n sm2 += Pxx_den[i]*(f[i]**2)\n \n #t2 = time.time()\n #print('time: ', t2-t2)\n return sm2\n\n\n#3\ndef LOG(y):\n n = len(y)\n return np.exp(np.sum(np.log(np.abs(y)))/n)\n\n#4\ndef WL(x): # WL in primary manuscript\n return np.sum(abs(np.diff(x)))\n#6\ndef AC(x, lag=5): # autocorrelation\n\n \"\"\"\n [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation\n\n \"\"\"\n # This is important: If a series is passed, the product below is calculated\n # based on the index, which corresponds to squaring the series.\n if type(x) is pd.Series:\n x = x.values\n if len(x) < lag:\n return np.nan\n # Slice the relevant subseries based on the lag\n y1 = x[:(len(x)-lag)]\n y2 = x[lag:]\n # Subtract the mean of the whole series x\n x_mean = np.mean(x)\n # The result is sometimes referred to as \"covariation\"\n sum_product = np.sum((y1-x_mean)*(y2-x_mean))\n # Return the normalized unbiased covariance\n return sum_product / ((len(x) - lag) * np.var(x))\n\n#7\ndef BE(x, max_bins=30): # binned entropy\n hist, bin_edges = np.histogram(x, bins=max_bins)\n probs = hist / len(x)\n return - np.sum(p * np.math.log(p) for p in probs if p != 0)\n#15\ndef SE(x): # sample entropy\n \"\"\"\n [1] http://en.wikipedia.org/wiki/Sample_Entropy\n [2] https://www.ncbi.nlm.nih.gov/pubmed/10843903?dopt=Abstract\n \"\"\"\n x = np.array(x)\n\n sample_length = 1 # number of sequential points of the time series\n tolerance = 0.2 * np.std(x) # 0.2 is a common value for r - why?\n\n n = len(x)\n prev = np.zeros(n)\n curr = np.zeros(n)\n A = np.zeros((1, 1)) # number of matches for m = [1,...,template_length - 1]\n B = np.zeros((1, 1)) # number of matches for m = [1,...,template_length]\n\n for i in range(n - 1):\n nj = n - i - 1\n ts1 = x[i]\n for jj in range(nj):\n j = jj + i + 1\n if abs(x[j] - ts1) < tolerance: # distance between two vectors\n curr[jj] = prev[jj] + 1\n temp_ts_length = min(sample_length, curr[jj])\n for m in range(int(temp_ts_length)):\n A[m] += 1\n if j < n - 1:\n B[m] += 1\n else:\n curr[jj] = 0\n for j in range(nj):\n prev[j] = curr[j]\n\n N = n * (n - 1) / 2\n B = np.vstack(([N], B[0]))\n\n # sample entropy = -1 * (log (A/B))\n similarity_ratio = A / B\n se = -1 * np.log(similarity_ratio)\n se = np.reshape(se, -1)\n return se[0]\n\n#16\ndef TRAS(x, lag=5):\n # time reversal asymmetry statistic\n \"\"\"\n | [1] ., . (2014).\n | Highly comparative feature-based time-series classification.\n | Knowledge and Data Engineering, IEEE Transactions on 26, 3026–3037.\n \"\"\"\n n = len(x)\n x = np.asarray(x)\n if 2 * lag >= n:\n return 0\n else:\n return np.mean((np.roll(x, 2 * -lag) * np.roll(x, 2 * -lag) * np.roll(x, -lag) -\n np.roll(x, -lag) * x * x)[0:(n - 2 * lag)])\n \n \n#17 \ndef VAR(x): # variance \n return np.var(x)\n\n\n# In[89]:\n\nlen(nx[0])\n\n\n# In[90]:\n\ndef get_A(x):\n coeffs = wavedec(x, 'db4', level=2)\n cA2, cD2, cD1 = coeffs\n return cA2\n\n\n# In[91]:\n\nlen(get_A(nx[0]))\n\n\n# In[92]:\n\nprint(len(nx))\n\n\n# In[97]:\n\nimport time\nfx = []\ny = []\n\nt1 = time.time()\nfor i in range(len(nx)):\n cf = []\n cf.append(AE(get_A(nx[i])))\n cf.append(SM2(get_A(nx[i])))\n cf.append((LOG(get_A(nx[i]))))\n cf.append((WL(get_A(nx[i]))))\n cf.append((AC(get_A(nx[i]))))\n cf.append(((BE(get_A(nx[i])))))\n cf.append((SE(get_A(nx[i]))))\n cf.append((TRAS(get_A(nx[i]))))\n cf.append((VAR(get_A(nx[i]))))\n fx.append(cf)\n y.append(1)\n print('.', end = '')\n \nt2 = time.time()\nprint(t2-t1)\n\n\n# In[99]:\n\nprint(len(fx))\nprint(len(y))\n\n\n# In[100]:\n\nprint(len(fx[0]))\n\n\n# In[101]:\n\nt1 = time.time()\nfor i in range(len(ax)):\n cf = []\n cf.append(AE(get_A(ax[i])))\n cf.append(SM2(get_A(ax[i])))\n cf.append((LOG(get_A(ax[i]))))\n cf.append((WL(get_A(ax[i]))))\n cf.append((AC(get_A(ax[i]))))\n cf.append(((BE(get_A(ax[i])))))\n cf.append((SE(get_A(ax[i]))))\n cf.append((TRAS(get_A(ax[i]))))\n cf.append((VAR(get_A(ax[i]))))\n fx.append(cf)\n y.append(0)\n print('.', end = '')\n \nt2 = time.time()\nprint(t2-t1)\n\n\n# In[102]:\n\nprint(len(fx))\nprint(len(y))\n\n\n# In[103]:\n\nimport numpy as np\nfx = np.array(fx, dtype = 'float32')\n\n\n# In[104]:\n\nfx.shape\n\n\n# In[105]:\n\nfrom sklearn.preprocessing import StandardScaler\n\nscaler = StandardScaler()\nscaler.fit(fx)\nprint(scaler.mean_)\nX_all = scaler.transform(fx)\n\nprint(np.mean(X_all))\nprint(np.std(X_all))\n\n\n# In[109]:\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import svm\n\nclf = svm.SVC(kernel='linear', C=1)\nscores = cross_val_score(clf, X_all, y, cv=5)\nprint('Accuracy: ', scores.mean(), scores.std() * 2)\n\n\n# In[112]:\n\nfrom sklearn.neighbors import KNeighborsClassifier\nknn = KNeighborsClassifier(n_neighbors=100)\n\nscores = cross_val_score(knn, X_all, y, cv=5)\nprint('Accuracy: ', scores.mean(), scores.std() * 2)\n\n\n# In[114]:\n\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier()\n\nscores = cross_val_score(clf, X_all, y, cv=5)\nprint('Accuracy: ', scores.mean(), scores.std() * 2)\n\n\n# In[130]:\n\nfrom sklearn.ensemble import RandomForestClassifier\nclf = RandomForestClassifier(n_estimators=100, max_depth=None,\n min_samples_split=4, random_state=0)\n\nscores = cross_val_score(clf, X_all, y, cv=5)\nprint('Accuracy: ', scores.mean(), scores.std() * 2)\n\n\n# In[125]:\n\nfrom sklearn.ensemble import AdaBoostClassifier\nclf = AdaBoostClassifier(n_estimators=10)\n\nscores = cross_val_score(clf, X_all, y, cv=5)\nprint('Accuracy: ', scores.mean(), scores.std() * 2)\n\n\n# In[ ]:\n\n\n\n"},"size":{"kind":"number","value":7851,"string":"7,851"}}},{"rowIdx":128396,"cells":{"max_stars_repo_path":{"kind":"string","value":"app/motivator/motivator_bot/telegram_bot.py"},"max_stars_repo_name":{"kind":"string","value":"SabaunT/bot-motivator"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2170628"},"content":{"kind":"string","value":"import logging\n\nfrom telegram import Bot\nfrom telegram.ext import Updater, ConversationHandler\n\nfrom app.settings import AppSettings\nfrom app.motivator.motivator_bot.handlers import (\n register_habits_conv_handler_kwargs, delete_habits_conv_handler_kwargs, help_handler\n)\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nclass MotivatorBot:\n # todo logging\n def __init__(self):\n self.token = AppSettings.TOKEN\n self.updater = Updater(token=self.token, use_context=True)\n self.dispatcher = self.updater.dispatcher\n\n def setup(self):\n register_habits_conv_handler = ConversationHandler(**register_habits_conv_handler_kwargs)\n delete_habits_conv_handler = ConversationHandler(**delete_habits_conv_handler_kwargs)\n\n self.dispatcher.add_handler(help_handler)\n self.dispatcher.add_handler(register_habits_conv_handler)\n self.dispatcher.add_handler(delete_habits_conv_handler)\n\n def run(self):\n self.updater.start_polling()\n self.updater.idle()\n\n @property\n def bot(self):\n return Bot(token=self.token)\n\n\nmotivator = MotivatorBot()\n"},"size":{"kind":"number","value":1218,"string":"1,218"}}},{"rowIdx":128397,"cells":{"max_stars_repo_path":{"kind":"string","value":"pyrustic/widget/pathentry.py"},"max_stars_repo_name":{"kind":"string","value":"tutlane/pyrustic"},"max_stars_count":{"kind":"number","value":0,"string":"0"},"id":{"kind":"string","value":"2172316"},"content":{"kind":"string","value":"import tkinter as tk\nfrom tkinter import filedialog\nfrom pyrustic import widget\nfrom pyrustic.tkmisc import merge_cnfs\nfrom pyrustic.view import View\n\n\nENTRY = \"entry\"\nBUTTON = \"button\"\nDIALOG = \"dialog\"\n\n\nclass Pathentry(widget.Frame):\n \"\"\"\n \"\"\"\n def __init__(self,\n master=None,\n browse=\"file\",\n width=17,\n title=None,\n initialdir=None,\n cnfs=None):\n \"\"\"\n - master: widget parent. Example: an instance of tk.Frame\n\n \"\"\"\n self.__cnfs = merge_cnfs({ENTRY: {\"width\": width}}, cnfs,\n components=(\"body\", ENTRY, BUTTON, DIALOG))\n super().__init__(master=master,\n class_=\"Pathentry\",\n cnf=self.__cnfs[\"body\"],\n on_build=self.__on_build,\n on_display=self.__on_display,\n on_destroy=self.__on_destroy)\n self.__browse = browse\n self.__title = title\n self.__initialdir = initialdir\n self.__entry = None\n self.__button = None\n self.__components = {}\n self.__string_var = tk.StringVar(value=\"\")\n # build\n self.__view = self.build()\n # ==============================================\n # PROPERTIES\n # ==============================================\n @property\n def components(self):\n \"\"\"\n \"\"\"\n return self.__components\n\n @property\n def string_var(self):\n return self.__string_var\n\n @property\n def path(self):\n return self.__path\n\n @path.setter\n def path(self, val):\n self.__path = val\n\n def __on_build(self):\n self.__entry = tk.Entry(self, textvariable=self.__string_var,\n cnf=self.__cnfs[ENTRY])\n self.__entry.pack(side=tk.LEFT, pady=0, fill=tk.X, expand=1)\n self.__components[\"entry\"] = self.__entry\n self.__button = tk.Button(self, text=\"...\",\n command=self.__on_click_button,\n cnf=self.__cnfs[BUTTON])\n self.__button.pack(side=tk.LEFT, padx=(2, 0), fill=tk.Y)\n self.__components[\"button\"] = self.__button\n\n def __on_display(self):\n pass\n\n def __on_destroy(self):\n pass\n\n def __on_click_button(self):\n if self.__browse == \"file\":\n try:\n filename = filedialog.askopenfilename(initialdir=self.__initialdir,\n title=self.__title,\n **self.__cnfs[DIALOG])\n except Exception as e:\n return\n path = None\n if not filename:\n pass\n elif isinstance(filename, str):\n path = filename\n else:\n path = \";\".join(filename)\n if path:\n self.__string_var.set(path)\n else:\n try:\n filename = filedialog.askdirectory(initialdir=self.__initialdir,\n title=self.__title,\n **self.__cnfs[DIALOG])\n except Exception as e:\n return\n path = None\n if not filename:\n pass\n elif isinstance(filename, str):\n path = filename\n else:\n path = \";\".join(filename)\n if path:\n self.__string_var.set(path)\n self.__entry.icursor(\"end\")\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n pathentry_test = Pathentry(root, browse=\"dir\",\n extra_options={\"dialog\":\n {\"initialdir\": \"/home/alex\",\n \"title\": \"Hello\"}})\n pathentry_test.pack(fill=tk.BOTH, expand=1)\n root.mainloop()\n"},"size":{"kind":"number","value":4023,"string":"4,023"}}},{"rowIdx":128398,"cells":{"max_stars_repo_path":{"kind":"string","value":"third_party/xcb_proto/protos.bzl"},"max_stars_repo_name":{"kind":"string","value":"TokTok/toktok-stack"},"max_stars_count":{"kind":"number","value":12,"string":"12"},"id":{"kind":"string","value":"2172156"},"content":{"kind":"string","value":"\"\"\"List of all the XCB protocols.\"\"\"\n\nxcb_protos = [\n \"bigreq\",\n \"composite\",\n \"damage\",\n \"dpms\",\n \"dri2\",\n \"dri3\",\n \"ge\",\n \"glx\",\n \"present\",\n \"randr\",\n \"record\",\n \"render\",\n \"res\",\n \"screensaver\",\n \"shape\",\n \"shm\",\n \"sync\",\n \"xc_misc\",\n \"xevie\",\n \"xf86dri\",\n \"xf86vidmode\",\n \"xfixes\",\n \"xinerama\",\n \"xinput\",\n \"xkb\",\n \"xprint\",\n \"xproto\",\n \"xselinux\",\n \"xtest\",\n \"xv\",\n \"xvmc\",\n]\n"},"size":{"kind":"number","value":475,"string":"475"}}},{"rowIdx":128399,"cells":{"max_stars_repo_path":{"kind":"string","value":"worker/__main__.py"},"max_stars_repo_name":{"kind":"string","value":"Antonio32A/blurplefier"},"max_stars_count":{"kind":"number","value":3,"string":"3"},"id":{"kind":"string","value":"2172308"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nfrom . import Worker\nfrom common import setup_logging\n\n\nwith setup_logging():\n Worker.with_config().run()\n"},"size":{"kind":"number","value":134,"string":"134"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":1283,"numItemsPerPage":100,"numTotalItems":129320,"offset":128300,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjQ3NjU1NSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9zdGFyY29kZXJkYXRhX3B5X3Ntb2wiLCJleHAiOjE3NTY0ODAxNTUsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.LLCB6MOH2tvNa78Tbrp4U0nVGcylm7gMKwO2NS2k2HSDp_b0664CC570Xx8PVq47j2u22GTJ-LRV7Oj9zL3SAw","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
max_stars_repo_path
stringlengths
4
182
max_stars_repo_name
stringlengths
6
116
max_stars_count
int64
0
191k
id
stringlengths
7
7
content
stringlengths
100
10k
size
int64
100
10k
cronjobs/jobs/config/__init__.py
guilhermeKodama/Closetinn
0
2172449
import os env = os.getenv('CLOSETINN_ENV', 'development') print 'ENV:', env if env == 'test': from test import * elif env == 'development': from development import * elif env == 'production': from production import *
231
tcrdist/tests/test_join.py
zozo123/tcrdist3
26
2171442
import pytest import pandas as pd v20df = pd.read_csv('https://www.dropbox.com/s/wmc5wc752t782kq/MIRA_v21_covid_diagnosed_sars_cov2_ci_epitope_specific_tcrs.tsv?dl=1', sep = "\t").head(1000) v21df = pd.read_csv('https://www.dropbox.com/s/c3gfq1lu0xdefpy/MIRA_v20_covid_diagnosed_sars_cov2_ci_epitope_specific_tcrs.tsv?dl=1', sep = "\t").head(1000) """ Example 1, Edit Dist 1 join """ def test_tcr_join_edit1(): import pandas as pd from tcrdist.join import join_by_dist from tcrdist.breadth import get_safe_chunk from tcrdist.rep_funcs import compute_pws_sparse import pwseqdist as pw my_metrics = { "cdr3_b_aa" : pw.metrics.nb_vector_editdistance} my_weights = { "cdr3_b_aa" : 1} my_kargs = {"cdr3_b_aa" :{'use_numba': True}} distances = compute_pws_sparse( df= v21df, df2 = v20df , metrics = my_metrics, weights = my_weights, kargs = my_kargs, radius=1, cpu=2, chunk_size=get_safe_chunk(v21df.shape[0], v20df.shape[0]), store=False, pm_pbar=True) csrmat = distances['tcrdist'] # it's called a tcrdist here, but was computed as edit distance 1 left_join_df = join_by_dist( how = 'left', csrmat = csrmat, left_df = v21df, right_df = v20df, left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], left_suffix = '_x', right_suffix = '_y', max_n= 10, radius = 1) inner_join_df = join_by_dist( how = 'inner', csrmat = csrmat, left_df = v21df, right_df = v20df, left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], left_suffix = '_x', right_suffix = '_y', max_n= 10, radius = 1) outer_join_df = join_by_dist( how = 'outer', csrmat = csrmat, left_df = v21df, right_df = v20df, left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], left_suffix = '_x', right_suffix = '_y', max_n= 10, radius = 1) assert left_join_df.shape[0] > inner_join_df.shape[0] assert outer_join_df.shape[0] > inner_join_df.shape[0] assert outer_join_df.shape[0] > left_join_df.shape[0] """ 2. Full Example using TCRdist on all CDRs on real data from two MIRA cohorts, Here we use TCRrep to infer CDR1,2,2.5 to compute a full tcrdist """ def test_tcr_join_tcrdist(): import pandas as pd from tcrdist.breadth import get_safe_chunk from tcrdist.repertoire import TCRrep from tcrdist.join import join_by_dist tr20 = TCRrep(cell_df = v20df[['subject', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'bio_identity','protein_coordinate']].copy(), organism='human', chains=['beta'], compute_distances = False) tr21 = TCRrep(cell_df = v21df[['subject', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'bio_identity', 'protein_coordinate']].copy(), organism='human', chains=['beta'], compute_distances = False) tr21.cpus = 2 tr21.compute_sparse_rect_distances(df = tr21.clone_df, df2 = tr20.clone_df, radius = 36, chunk_size = get_safe_chunk(tr21.clone_df.shape[0], tr20.clone_df.shape[0])) left_right_comparision = join_by_dist( how = 'inner', csrmat = tr21.rw_beta, left_df = v21df, right_df = v20df, left_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], right_cols = ['cdr3_b_aa','v_b_gene','j_b_gene','protein_coordinate','bio_identity','subject'], left_suffix = '_x', right_suffix = '_y', max_n= 10, radius = 24)
3,915
src/pyterpreter/RuntimeError_.py
kinshukk/pyterpreter
4
2172092
from TokenType import * class RuntimeError_(RuntimeError): def __init__(self, token, message): super().__init__(message) self.token = token self.message = message
192
medicine_details.py
vishuvish/know_your_medicine
1
2169625
# import the necessary packages from PIL import Image import pytesseract import argparse import cv2 import os from crawler import get_details pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR/tesseract' # load the example image and convert it to grayscale image = cv2.imread("C:/git/know_your_medicine/images/" + "med2.jpg") gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) cv2.imshow("Image", gray) # write the grayscale image to disk as a temporary file so we can # apply OCR to it filename = "{}.png".format(os.getpid()) print(filename) cv2.imwrite("C:/git/know_your_medicine/images/" + filename, gray) #img = cv2.imread("C:/Users/VISHAL/images/" + filename) #cv2.imshow(filename, img) # load the image as a PIL/Pillow image, apply OCR, and then delete # the temporary file src_path = "C:/git/know_your_medicine/images/" text = pytesseract.image_to_string(Image.open(src_path + filename)) os.remove("C:/git/know_your_medicine/images/" + filename) get_details(text) # show the output images # cv2.imshow("Image", image) #cv2.imshow("Output", gray) cv2.waitKey(0)
1,086
roasttron/apps/temppoint/views.py
chrisvans/roasttron
0
2171897
import models import serializers # Third Party from rest_framework import viewsets class TempPointViewSet(viewsets.ModelViewSet): """ API endpoint that allows temppoints to be viewed or edited. """ queryset = models.TempPoint.objects.all() serializer_class = serializers.TempPointSerializer
313
taxman/tax/income_tax.py
robinmitra/taxman
3
2170678
from taxman.allowance.personal_allowance import PersonalAllowance from taxman.income.employment import Employment class IncomeTax: def __init__(self, incomes, allowances): self._incomes = incomes self._allowances = allowances def calculate(self): employment_income = next( income for income in self._incomes if type(income) is Employment) tax_free, basic_rate, higher_rate, additional_rate = self.get_bands( employment_income.get_salary()) tax = (basic_rate * 0.2) + (higher_rate * 0.4) + ( additional_rate * 0.45) return tax def get_bands(self, income): personal_allowance = next( allowance for allowance in self._allowances if type(allowance) is PersonalAllowance) pa = personal_allowance.get_allowance() basic_rate_end, higher_rate_end = 46350, 150000 tax_free = pa if income > pa else income basic_rate, higher_rate, additional_rate = 0, 0, 0 if income > pa: basic_rate = basic_rate_end - pa if income > higher_rate_end: higher_rate = higher_rate_end - basic_rate_end additional_rate = income - higher_rate_end elif income > basic_rate_end: higher_rate = income - basic_rate_end return tax_free, basic_rate, higher_rate, additional_rate
1,405
authd.py
lamw/mks
23
2171252
# Copyright (c) 2015 <NAME> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import urlparse import websockify import socket import ssl import base64 import hashlib import os import random VMAD_OK = 200 VMAD_WELCOME = 220 VMAD_LOGINOK = 230 VMAD_NEEDPASSWD = 331 VMAD_USER_CMD = "USER" VMAD_PASS_CMD = "PASS" VMAD_THUMB_CMD = "THUMBPRINT" VMAD_CONNECT_CMD = "CONNECT" def expect(sock, code): line = sock.recv(1024) recv_code, msg = line.split()[0:2] if code != int(recv_code): raise Exception('Expected %d but received %d' % (code, recv_code)) return msg def handshake(host, port, ticket, cfg_file, thumbprint): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) expect(sock, VMAD_WELCOME) sock = ssl.wrap_socket(sock) sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) cert = sock.getpeercert(binary_form=True) h = hashlib.sha1() h.update(cert) if thumbprint != h.hexdigest(): raise Exception("Server thumbprint doesn't match") sock.write("%s %s\r\n" % (VMAD_USER_CMD, ticket)) expect(sock, VMAD_NEEDPASSWD) sock.write("%s %s\r\n" % (VMAD_PASS_CMD, ticket)) expect(sock, VMAD_LOGINOK) rand = os.urandom(12) rand = base64.b64encode(rand) sock.write("%s %s\r\n" % (VMAD_THUMB_CMD, rand)) thumbprint2 = expect(sock, VMAD_OK) thumbprint2 = thumbprint2.replace(':', '').lower() sock.write("%s %s mks\r\n" % (VMAD_CONNECT_CMD, cfg_file)) expect(sock, VMAD_OK) sock2 = ssl.wrap_socket(sock) cert2 = sock2.getpeercert(binary_form=True) h = hashlib.sha1() h.update(cert2) if thumbprint2 != h.hexdigest(): raise Exception("Second thumbprint doesn't match") sock2.write(rand) return sock2 class AuthdRequestHandler(websockify.ProxyRequestHandler): def new_websocket_client(self): parse = urlparse.urlparse(self.path) query = parse.query args = urlparse.parse_qs(query) host = args.get("host", [""]).pop() port = args.get("port", [""]).pop() port = int(port) ticket = args.get("ticket", [""]).pop() cfg_file = args.get("cfgFile", [""]).pop() thumbprint = args.get("thumbprint", [""]).pop() thumbprint = thumbprint.replace(':', '').lower() tsock = handshake(host, port, ticket, cfg_file, thumbprint) self.do_proxy(tsock)
2,955
setup.py
Quansight/bitemporal-h5
3
2172303
#!/usr/bin/env python3 import os import sys from setuptools import setup def main(): """The main entry point.""" if sys.version_info[:2] < (3, 4): sys.exit("xonsh currently requires Python 3.4+") with open(os.path.join(os.path.dirname(__file__), "README.md"), "r") as f: readme = f.read() skw = dict( name="bth5", description="Bitemporal HDF5", long_description=readme, long_description_content_type="text/markdown", license="BSD-3-Clause", version="0.0.3", author="<NAME>", maintainer="Quansight", author_email="<EMAIL>", url="https://github.com/quansight/bitemporal-h5", platforms="Cross Platform", classifiers=["Programming Language :: Python :: 3"], packages=["bth5"], package_dir={"bth5": "bth5"}, zip_safe=True, install_requires=["h5py >= 2.8", "numpy>=1.16", "numba>=0.45"], extras_require={ "tests": ["pytest>=3.5", "pytest-black", "pytest-cov"], "docs": ["sphinx", "sphinx_rtd_theme"], }, ) setup(**skw) if __name__ == "__main__": main()
1,164
dataplicity/tags.py
wildfoundry/dataplicity-agent
170
2170115
from __future__ import unicode_literals import os import subprocess import re import logging log = logging.getLogger("agent") class TagError(Exception): """Custom exception raised when get_tag_list has an exception""" def get_tag_list(): """Run the dataplicity.tags script, get output as a list of tags""" home_dir = os.environ.get("HOME", "/home/dataplicity/") tag_executable = os.path.join(home_dir, "dataplicity_tags") # Early out if the script isn't there. if not os.path.exists(tag_executable): log.debug("tag executable %s does not exist", tag_executable) return [] log.debug("reading tags from %s", tag_executable) try: output = subprocess.check_output(tag_executable) except OSError as error: log.debug("failed to run %s; %s", tag_executable, error) return [] except Exception as error: log.error("error running %s; %s", tag_executable, error) raise TagError("error running %s" % tag_executable) str_output = output.decode("utf-8", errors="ignore") # regex split on comma, spaces, newline and tabs tag_list = re.split(r"[,\s\n\t]", str_output) tags = [tag.strip()[:25] for tag in tag_list if tag] return tags
1,244
biotorch/layers/fa/conv.py
jsalbert/biotorch
17
2172378
import torch import torch.nn as nn import biotorch.layers.fa_constructor as fa_constructor from typing import Union from torch.nn.common_types import _size_2_t from biotorch.autograd.fa.conv import Conv2dGrad from biotorch.layers.metrics import compute_matrix_angle class Conv2d(fa_constructor.Conv2d): def __init__( self, in_channels: int, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t = 1, padding: Union[str, _size_2_t] = 0, dilation: _size_2_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', layer_config: dict = None ): if layer_config is None: layer_config = {} layer_config["type"] = "fa" super(Conv2d, self).__init__( in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, layer_config )
1,082
src/python/pants/backend/python/goals/publish_test.py
hephex/pants
0
2172347
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations from textwrap import dedent import pytest from pants.backend.python.goals.publish import PublishToPyPiFieldSet, PublishToPyPiRequest, rules from pants.backend.python.macros.python_artifact import PythonArtifact from pants.backend.python.target_types import PythonDistribution, PythonSourcesGeneratorTarget from pants.backend.python.util_rules import pex_from_targets from pants.core.goals.package import BuiltPackage, BuiltPackageArtifact from pants.core.goals.publish import PublishPackages, PublishProcesses from pants.core.util_rules.config_files import rules as config_files_rules from pants.engine.addresses import Address from pants.engine.fs import EMPTY_DIGEST from pants.testutil.rule_runner import QueryRule, RuleRunner from pants.util.frozendict import FrozenDict @pytest.fixture def rule_runner() -> RuleRunner: rule_runner = RuleRunner( rules=[ *config_files_rules(), *pex_from_targets.rules(), *rules(), QueryRule(PublishProcesses, [PublishToPyPiRequest]), ], target_types=[PythonSourcesGeneratorTarget, PythonDistribution], objects={"python_artifact": PythonArtifact}, ) rule_runner.set_options( [], env_inherit={"PATH", "PYENV_ROOT", "HOME"}, env={"TWINE_PASSWORD_PYPI": "secret"}, ) return rule_runner @pytest.fixture def packages(): return ( BuiltPackage( EMPTY_DIGEST, ( BuiltPackageArtifact("my-package-0.1.0.tar.gz"), BuiltPackageArtifact("my_package-0.1.0-py3-none-any.whl"), ), ), ) def project_files(skip_twine: bool) -> dict[str, str]: return { "src/BUILD": dedent( f"""\ python_sources() python_distribution( name="dist", provides=python_artifact( name="my-package", version="0.1.0", ), pypi_repositories=["@pypi", "@private"], skip_twine={skip_twine}, ) """ ), "src/hello.py": """print("hello")""", ".pypirc": "", } def assert_package( package: PublishPackages, expect_names: tuple[str, ...], expect_description: str, expect_process, ) -> None: assert package.names == expect_names assert package.description == expect_description if expect_process: expect_process(package.process) else: assert package.process is None def process_assertion(**assertions): def assert_process(process): for attr, expected in assertions.items(): assert getattr(process, attr) == expected return assert_process def test_twine_upload(rule_runner, packages) -> None: rule_runner.write_files(project_files(skip_twine=False)) tgt = rule_runner.get_target(Address("src", target_name="dist")) fs = PublishToPyPiFieldSet.create(tgt) result = rule_runner.request(PublishProcesses, [fs._request(packages)]) assert len(result) == 2 assert_package( result[0], expect_names=( "my-package-0.1.0.tar.gz", "my_package-0.1.0-py3-none-any.whl", ), expect_description="@pypi", expect_process=process_assertion( argv=( "./twine.pex_pex_shim.sh", "upload", "--non-interactive", "--config-file=.pypirc", "--repository=pypi", "my-package-0.1.0.tar.gz", "my_package-0.1.0-py3-none-any.whl", ), env=FrozenDict({"TWINE_PASSWORD": "<PASSWORD>"}), ), ) assert_package( result[1], expect_names=( "my-package-0.1.0.tar.gz", "my_package-0.1.0-py3-none-any.whl", ), expect_description="@private", expect_process=process_assertion( argv=( "./twine.pex_pex_shim.sh", "upload", "--non-interactive", "--config-file=.pypirc", "--repository=private", "my-package-0.1.0.tar.gz", "my_package-0.1.0-py3-none-any.whl", ), env=FrozenDict(), ), ) def test_skip_twine(rule_runner, packages) -> None: rule_runner.write_files(project_files(skip_twine=True)) tgt = rule_runner.get_target(Address("src", target_name="dist")) fs = PublishToPyPiFieldSet.create(tgt) result = rule_runner.request(PublishProcesses, [fs._request(packages)]) assert len(result) == 1 assert_package( result[0], expect_names=( "my-package-0.1.0.tar.gz", "my_package-0.1.0-py3-none-any.whl", ), expect_description="(by `skip_twine` on src:dist)", expect_process=None, ) # Skip twine globally from config option. rule_runner.set_options(["--twine-skip"]) result = rule_runner.request(PublishProcesses, [fs._request(packages)]) assert len(result) == 0
5,251
autobuyfast/cars/migrations/0014_auto_20210811_1559.py
dark-codr/autouyfast
0
2172428
# Generated by Django 3.1.13 on 2021-08-11 14:59 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cars', '0013_auto_20210811_1456'), ] operations = [ migrations.AddField( model_name='autosearch', name='car_sub_price', field=models.DecimalField(blank=True, decimal_places=1, max_digits=40, null=True, verbose_name='Car Old Price'), ), migrations.AlterField( model_name='autosearch', name='car_price', field=models.DecimalField(blank=True, decimal_places=1, max_digits=40, null=True, verbose_name='Car Main Price'), ), ]
702
{{cookiecutter.project_slug}}/src/a12n/tests/jwt_views/test_obtain_jwt_view.py
ginsenghillock/django
98
2170405
import json import pytest from axes.models import AccessAttempt pytestmark = pytest.mark.django_db @pytest.fixture(autouse=True) def _enable_django_axes(settings): settings.AXES_ENABLED = True @pytest.fixture def get_token(as_user): def _get_token(username, password, expected_status=201): return as_user.post('/api/v1/auth/token/', { 'username': username, 'password': password, }, format='json', expected_status=expected_status) return _get_token def _decode(response): return json.loads(response.content.decode('utf-8', errors='ignore')) def test_getting_token_ok(as_user, get_token): got = get_token(as_user.user.username, as_user.password) assert 'token' in got def test_getting_token_is_token(as_user, get_token): got = get_token(as_user.user.username, as_user.password) assert len(got['token']) > 32 # every stuff that is long enough, may be a JWT token def test_getting_token_with_incorrect_password(as_user, get_token): got = get_token(as_user.user.username, '<PASSWORD>', expected_status=400) assert 'nonFieldErrors' in got def test_getting_token_with_incorrect_password_creates_access_attempt_log_entry(as_user, get_token): get_token(as_user.user.username, '<PASSWORD>', expected_status=400) assert AccessAttempt.objects.count() == 1 @pytest.mark.parametrize(('extract_token', 'status_code'), [ (lambda response: response['token'], 200), (lambda *args: '<KEY>', 401), (lambda *args: 'sh1t', 401), ]) def test_received_token_works(as_user, get_token, as_anon, extract_token, status_code): token = extract_token(get_token(as_user.user.username, as_user.password)) as_anon.credentials(HTTP_AUTHORIZATION=f'Bearer {token}') as_anon.get('/api/v1/users/me/', expected_status=status_code)
1,830
src/data/xes_reader.py
amaraletitia/pm19
0
2171179
import datetime import xml.etree.ElementTree as ET import collections class XesReader(object): """docstring for XesReader""" def __init__(self, filepath): super(XesReader, self).__init__() #filepath = './example/financial_log.xes' self.tree = ET.parse(filepath) self.root = self.tree.getroot() #need to specify self.ns = {'xes': "http://code.deckfour.org/xes"} def to_dict_eventlog(self, *args): len_attr = len(args) dict_eventlog= collections.defaultdict(list) for trace in self.root.findall('xes:trace', self.ns): caseid = '' for string in trace.findall('xes:string', self.ns): if string.attrib['key'] == 'concept:name': caseid = string.attrib['value'] for event in trace.findall('xes:event', self.ns): task = '' user = '' event_type = '' dict_eventlog['CASE_ID'].append(caseid) for string in event.findall('xes:string', self.ns): if string.attrib['key'] == 'concept:name': task = string.attrib['value'] if string.attrib['key'] == 'org:resource': user = string.attrib['value'] if string.attrib['key'] == 'lifecycle:traself.nsition': event_type = string.attrib['value'] #to coself.nsider additional attributes for i in range(len_attr): attr = string.attrib['value'] dict_eventlog[args[i]].append(attr) dict_eventlog['ACTIVITY'].append(task) dict_eventlog['RESOURCE'].append(user) dict_eventlog['LIFECYCLE'].append(event_type) timestamp = '' for date in event.findall('xes:date', self.ns): if date.attrib['key'] == 'time:timestamp': timestamp = date.attrib['value'] timestamp = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') dict_eventlog['TIMESTAMP'].append(timestamp) print("CASE_ID len: {}".format(len(dict_eventlog['CASE_ID']))) print("ACTIVITY len: {}".format(len(dict_eventlog['ACTIVITY']))) print("RESOURCE len: {}".format(len(dict_eventlog['RESOURCE']))) print("LIFECYCLE len: {}".format(len(dict_eventlog['LIFECYCLE']))) print("TIMESTAMP len: {}".format(len(dict_eventlog['TIMESTAMP']))) return dict_eventlog, args def to_eventlog(self, _input): return Eventlog.from_dict(_input) if __name__ == '__main__': XR = XesReader('./example/financial_log.xes') dict_eventlog, attrs = XR.to_dict_eventlog() eventlog = XR.to_eventlog(dict_eventlog) print(eventlog)
2,373
multichat/chat/migrations/0002_questionbank.py
kshitij3199/PreBosm_2K18
0
2172430
# Generated by Django 2.0.6 on 2018-07-01 07:25 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('chat', '0001_initial'), ] operations = [ migrations.CreateModel( name='QuestionBank', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('question_no', models.IntegerField()), ('question_title', models.CharField(max_length=255)), ('question_text', models.CharField(max_length=255)), ('question_answer', models.CharField(max_length=255)), ('question_hint', models.CharField(blank=True, max_length=255, null=True)), ], ), ]
804
docs/source/base_classes/create_transformer/no_id_dropper.py
rt-phb/Spooq
3
2169501
from transformer import Transformer class NoIdDropper(Transformer): """ This is a simplified example on how to implement a new transformer class. Please take your time to write proper docstrings as they are automatically parsed via Sphinx to build the HTML and PDF documentation. Docstrings use the style of Numpy (via the napoleon plug-in). This class uses the :meth:`pyspark.sql.DataFrame.dropna` method internally. Examples -------- input_df = some_extractor_instance.extract() transformed_df = NoIdDropper( id_columns='user_id' ).transform(input_df) Parameters ---------- id_columns: :any:`str` or :any:`list` The name of the column containing the identifying Id values. Defaults to "id" Raises ------ :any:`exceptions.ValueError`: "how ('" + how + "') should be 'any' or 'all'" :any:`exceptions.ValueError`: "subset should be a list or tuple of column names" """ def __init__(self, id_columns='id'): super(NoIdDropper, self).__init__() self.id_columns = id_columns def transform(self, input_df): self.logger.info("Dropping records without an Id (columns to consider: {col})" .format(col=self.id_columns)) output_df = input_df.dropna( how='all', thresh=None, subset=self.id_columns ) return output_df
1,451
bspider/parser/__init__.py
littlebai3618/bspider
3
2170955
from .base_pipeline import BasePipeline from .base_extractor import BaseExtractor from .work import run_parser from .item import * __all__ = [ 'BasePipeline', 'BaseExtractor', 'run_parser', 'Item', 'MySQLSaverItem' ]
237
apps/test/brain/brain_base_cases.py
neelrast/pepper-depression-module
0
2171176
from pepper.brain.long_term_memory import LongTermMemory from pepper.brain.utils.base_cases import statements from pepper.brain.utils.helper_functions import phrase_update, phrase_cardinality_conflicts, phrase_negation_conflicts, \ phrase_statement_novelty, phrase_type_novelty, phrase_subject_gaps, phrase_object_gaps, phrase_overlaps, phrase_trust # Create brain connection brain = LongTermMemory() for elem in statements: x = brain.update(elem) print(elem['subject']['label'], elem['predicate']['type'], elem['object']['label']) print('\tcardinality conflicts: '+phrase_cardinality_conflicts(x['cardinality_conflicts'], elem)) print('\tnegation conflicts: '+phrase_negation_conflicts(x['negation_conflicts'], elem)) print('\tstatement novelty: '+phrase_statement_novelty(x['statement_novelty'])) print('\ttype novelty: '+phrase_type_novelty(x['entity_novelty'], elem)) print('\tsubject gaps: '+phrase_subject_gaps(x['subject_gaps'], elem)) print('\tobject gaps: '+phrase_object_gaps(x['object_gaps'], elem)) print('\toverlaps: '+phrase_overlaps(x['overlaps'], elem)) print('\ttrust: '+phrase_trust(x['trust'])) print('\t\t\tFINAL SAY: '+phrase_update(x, proactive=True, persist=True))
1,250
curso-python/poo/loja/pessoa.py
gui-hub/Estudos-Python
0
2170767
MAIOR_IDADE = 18 class Pessoa: def __init__(self, nome, idade): self.nome = nome self.idade = idade def __str__(self): if not self.idade: # if self.idade nao estiver setado self.idade else: return f'{self.nome}, {self.idade} anos' def isAdult(self): return self.idade >= MAIOR_IDADE # return self.idade IF SELF.IDADE > MAIOR_IDADE ("se for maior que")
434
algorithms/symmetry/cosym/test_cosym_analyse_datasets.py
TiankunZhou/dials
0
2170054
from __future__ import absolute_import, division, print_function import pytest import libtbx from cctbx import sgtbx from dials.algorithms.symmetry.cosym import CosymAnalysis, phil_scope from dials.algorithms.symmetry.cosym._generate_test_data import generate_test_data @pytest.mark.parametrize( ( "space_group", "unit_cell", "dimensions", "sample_size", "use_known_space_group", "use_known_lattice_group", "best_monoclinic_beta", ), [ ("P2", None, None, 10, False, False, True), ("P3", None, None, 20, False, False, True), ("I23", None, libtbx.Auto, 10, False, False, True), ("I23", None, libtbx.Auto, 10, True, False, True), ("P422", (79, 79, 37, 90, 90, 90), None, 20, True, True, True), ("P321", (59.39, 59.39, 28.35, 90, 90, 120), None, 5, False, False, True), ( "C121", (112.67, 52.85, 44.47, 90.00, 102.97, 90.00), None, 5, False, False, False, ), ( "C121", (112.67, 52.85, 44.47, 90.00, 102.97, 90.00), None, 5, True, True, False, ), ( "I121", (44.47, 52.85, 111.46, 90.00, 99.91, 90.00), None, 5, False, False, True, ), ( "I121", (44.47, 52.85, 111.46, 90.00, 99.91, 90.00), None, 5, True, True, True, ), ], ) def test_cosym( space_group, unit_cell, dimensions, sample_size, use_known_space_group, use_known_lattice_group, best_monoclinic_beta, run_in_tmpdir, ): import matplotlib matplotlib.use("Agg") datasets, expected_reindexing_ops = generate_test_data( space_group=sgtbx.space_group_info(symbol=space_group).group(), unit_cell=unit_cell, unit_cell_volume=10000, d_min=1.5, map_to_p1=True, sample_size=sample_size, seed=1, ) expected_space_group = sgtbx.space_group_info(symbol=space_group).group() params = phil_scope.extract() params.cluster.n_clusters = len(expected_reindexing_ops) params.dimensions = dimensions params.best_monoclinic_beta = best_monoclinic_beta if use_known_space_group: params.space_group = expected_space_group.info() if use_known_lattice_group: params.lattice_group = expected_space_group.info() params.normalisation = None cosym = CosymAnalysis(datasets, params) cosym.run() d = cosym.as_dict() if not use_known_space_group: assert d["subgroup_scores"][0]["likelihood"] > 0.89 assert ( sgtbx.space_group(d["subgroup_scores"][0]["patterson_group"]) == sgtbx.space_group_info(space_group) .group() .build_derived_patterson_group() ) space_groups = {} reindexing_ops = {} for dataset_id in cosym.reindexing_ops.keys(): if 0 in cosym.reindexing_ops[dataset_id]: cb_op = cosym.reindexing_ops[dataset_id][0] reindexing_ops.setdefault(cb_op, set()) reindexing_ops[cb_op].add(dataset_id) if dataset_id in cosym.space_groups: space_groups.setdefault(cosym.space_groups[dataset_id], set()) space_groups[cosym.space_groups[dataset_id]].add(dataset_id) assert len(reindexing_ops) == len(expected_reindexing_ops) assert len(space_groups) == 1 space_group_info = list(space_groups)[0].info() if use_known_space_group: expected_sg = sgtbx.space_group_info(space_group).group() else: expected_sg = ( sgtbx.space_group_info(space_group).group().build_derived_patterson_group() ) assert cosym.best_subgroup["best_subsym"].space_group() == expected_sg for cb_op, ridx_set in reindexing_ops.items(): for expected_set in expected_reindexing_ops.values(): assert (len(ridx_set.symmetric_difference(expected_set)) == 0) or ( len(ridx_set.intersection(expected_set)) == 0 ) for d_id in ridx_set: reindexed = ( datasets[d_id] .change_basis(sgtbx.change_of_basis_op(cb_op)) .customized_copy( space_group_info=space_group_info.change_basis( cosym.cb_op_inp_min.inverse() ) ) ) assert reindexed.is_compatible_unit_cell(), str( reindexed.crystal_symmetry() )
4,744
server/geonames/management/commands/city.py
coll-gate/collgate
2
2172400
# -*- coding: utf-8; -*- # # @file city.py # @brief # @author <NAME> (INRA UMR1095) # @date 2017-01-03 # @copyright Copyright (c) 2016 INRA/CIRAD # @license MIT (see LICENSE file) # @details """ Install City """ from django.db import transaction from django.core.management import BaseCommand from geonames.appsettings import CITY_SOURCES, IGeoname, DATA_DIR from geonames import instance from geonames.models import City, Country from geonames.geonames import Geonames import progressbar import resource import sys import os from django.utils import timezone from colorama import Fore, Style class MemoryUsageWidget(progressbar.widgets.WidgetBase): def __call__(self, progress, data): if sys.platform != 'win32': return '%s kB' % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss return '?? kB' class Command(BaseCommand): help = """Download all files in GEONAMES_CITY_SOURCES if they were updated or if --force option was used. And Import city data if they were downloaded.""" def __init__(self): super(Command, self).__init__() self.progress_enabled = False self.progress_widgets = None self.progress = 0 self.force = False self.export_file = None self.export = False self.delete = False self.verbosity = None self.no_color = None self.cities_bulk = [] def add_arguments(self, parser): # Named (optional) arguments parser.add_argument( '-f', '--force', action='store_true', dest='force', default=False, help='Download and import even if matching files are up-to-date', ) parser.add_argument( '-np', '--no-progress', action='store_true', dest='no-progress', default=False, help='Hide progress bar' ) parser.add_argument( '-e', '--export', dest='export', action='store', default=False, nargs='?', help='Export files with matching data only. Absolute path to export file' ) parser.add_argument( '-d', '--delete', dest='delete', action='store_true', default=False, help='Delete local source files after importation' ) def progress_init(self): """Initialize progress bar.""" if self.progress_enabled: self.progress = 0 self.progress_widgets = [ Fore.LIGHTCYAN_EX, 'RAM used: ', MemoryUsageWidget(), ' ', progressbar.ETA(), ' Done: ', progressbar.Percentage(), ' ', progressbar.Bar( marker='▓', fill='░' ), ' ', progressbar.AnimatedMarker(markers='⎺⎻⎼⎽⎼⎻'), ' ', Style.RESET_ALL, ] def progress_start(self, max_value): """Start progress bar.""" if self.progress_enabled: self.progress = progressbar.ProgressBar( max_value=max_value, widgets=self.progress_widgets ).start() def progress_update(self, value): """Update progress bar.""" if self.progress_enabled: self.progress.update(value) def progress_finish(self): """Finalize progress bar.""" if self.progress_enabled: self.progress.finish() @transaction.atomic def handle(self, *args, **options): self.city_manager(args, options) def city_manager(self, args, options): self.progress_enabled = not options.get('no-progress') self.export = options.get('export') self.force = options.get('force') self.verbosity = options.get('verbosity') self.no_color = options.get('no_color') if self.export is None: self.export = '%s/city_light_%s.txt' % (DATA_DIR, timezone.now().isoformat('_') .replace(':', '-') .replace('.', '-')) self.delete = options.get('delete') self.progress_init() if self.export: file_path = self.export if os.path.exists(file_path): os.remove(file_path) else: print('Creating %s' % file_path) self.export_file = open(file_path, 'a') for source in CITY_SOURCES: geonames = Geonames(source, force=self.force) if not geonames.need_run: continue i = 0 nb_lines = geonames.num_lines() refresh_tx = int(nb_lines / 100) if (nb_lines / 100) >= 1 else 1 self.progress_start(nb_lines) if not self.progress_enabled: print('Importing...') cities_to_check = [] for items in geonames.parse(): current_city = self.city_check(items) if current_city: cities_to_check.append(current_city) if len(cities_to_check) >= 500: self.city_bulk(cities_to_check) cities_to_check = [] i += 1 if i % refresh_tx == 0: self.progress_update(i) if cities_to_check: self.city_bulk(cities_to_check) self.progress_finish() if self.export: self.export_file.close() geonames.finish(delete=self.delete) def city_check(self, items): if not items[IGeoname.featureCode] in instance.geonames_include_city_types: return False return { 'geoname_id': int(items[IGeoname.geonameid]), 'name': items[IGeoname.name], 'country_code': items[IGeoname.countryCode], 'country_id': self._get_country_id(items[IGeoname.countryCode]), 'latitude': items[IGeoname.latitude], 'longitude': items[IGeoname.longitude], 'population': items[IGeoname.population], 'feature_code': items[IGeoname.featureCode] } def city_bulk(self, cities_to_check): bulk = [] for city in cities_to_check: result = City.objects.filter(geoname_id=city.get('geoname_id')) if result: result[0].name = city.get('name') result[0].country_id = city.get('country_id') result[0].latitude = city.get('latitude') result[0].longitude = city.get('longitude') result[0].population = city.get('population') result[0].feature_code = city.get('feature_code') result[0].save() town = result[0] else: town = City( geoname_id=city.get('geoname_id'), name=city.get('name'), country_id=city.get('country_id'), latitude=city.get('latitude'), longitude=city.get('longitude'), population=city.get('population'), feature_code=city.get('feature_code') ) bulk.append(town) if self.export: r = [""] * 18 r[IGeoname.name] = city.get('name') r[IGeoname.countryCode] = city.get('country_code') r[IGeoname.latitude] = city.get('latitude') r[IGeoname.longitude] = city.get('longitude') r[IGeoname.population] = city.get('population') r[IGeoname.featureCode] = city.get('feature_code') r[IGeoname.geonameid] = str(city.get('geoname_id')) self.export_file.write('\t'.join(r) + '\n') self.display_entry_message(town, True if result else False) if bulk: City.objects.bulk_create(bulk) self.display_bulk_message(len(bulk)) def _get_country_id(self, code2): """ Simple lazy identity map for code2->country """ if not hasattr(self, '_country_codes'): self._country_codes = {} if code2 not in self._country_codes.keys(): self._country_codes[code2] = Country.objects.get(code2=code2).pk return self._country_codes[code2] def display_bulk_message(self, bulk_size): if not self.progress_enabled and self.verbosity: print('BULK INSERT!\tNb_entries:%s' % bulk_size) def display_entry_message(self, city, state): if not self.progress_enabled and self.verbosity: display_state = "UPDATED" if state else "ADD" if not self.no_color: display_state = (Fore.BLUE if state else Fore.GREEN) + display_state + Style.RESET_ALL print('[%s] %s' % (display_state, city))
9,164
defects4cpp/config/config.py
HansolChoe/defects4cpp
10
2172183
""" Interface of env.py. All the modifiable settings should be accessed via 'config' instead of using env.py directly. """ import sys class _ConfigMeta(type): def __new__(mcs, name, bases, attrs): new_class = type.__new__(mcs, name, bases, attrs) if not hasattr(new_class, "_env_module"): def create_getter(getter_key: str): def getter(self): return getattr(self._env_module, getter_key) return getter def create_setter(setter_key: str): def setter(self, value: str): setattr(self._env_module, setter_key, value) return setter from config import env env_module = sys.modules[env.__name__] setattr(new_class, "_env_module", env_module) settings = { key: getattr(env_module, key) for key in dir(env_module) if key.startswith("DPP") } for k in settings: prop = property(create_getter(k), create_setter(k)) setattr(new_class, k, prop) return new_class class _Config(metaclass=_ConfigMeta): pass config = _Config()
1,235
djangoflutterwave/tests/test_serializers.py
bdelate/django-flutterwave
9
2172392
# stdlib imports # django imports from django.test import TestCase # 3rd party imports from rest_framework.exceptions import ValidationError # project imports from djangoflutterwave.serializers import DRTransactionSerializer from djangoflutterwave.tests.factories import FlwPlanModelFactory, UserFactory class TestDRTransactionSerializer(TestCase): """Test suite for the DRTransactionSerializer""" def test_validate_reference(self): """Ensure the serializer raises an exception for an invalid plan_id or user_id """ plan = FlwPlanModelFactory() user = UserFactory() expected_response = f"{plan.id}__test__{user.id}" actual_response = DRTransactionSerializer.validate_reference( self=None, value=expected_response ) self.assertEqual(expected_response, actual_response) with self.assertRaises(ValidationError) as e: DRTransactionSerializer.validate_reference( self=None, value=f"123__test__{user.id}" ) self.assertEqual(e.exception.detail[0], "Payment type does not exist") with self.assertRaises(ValidationError) as e: DRTransactionSerializer.validate_reference( self=None, value=f"{plan.id}__test__123" ) self.assertEqual(e.exception.detail[0], "User does not exist")
1,373
ghscard/_cache.py
thombashi/ghscard
11
2172350
from datetime import datetime from functools import total_ordering from typing import Union from datetimerange import DateTimeRange from path import Path @total_ordering class CacheTime: @property def second(self) -> Union[int, float]: return self.__second @property def hour(self) -> float: return self.second / (60 ** 2) def __init__(self, second: Union[int, float]): self.__second = second def __eq__(self, other) -> bool: return self.second == other.second def __lt__(self, other) -> bool: return self.second < other.second class CacheManager: def __init__(self, logger, cache_lifetime: CacheTime) -> None: self.__logger = logger self.__cache_lifetime = cache_lifetime def is_cache_available(self, cache_file_path: Path) -> bool: if not cache_file_path.isfile(): self.__logger.debug("cache not found: {}".format(cache_file_path)) return False try: dtr = DateTimeRange(datetime.fromtimestamp(cache_file_path.mtime), datetime.now()) except OSError: return False if not dtr.is_valid_timerange(): return False cache_elapsed = CacheTime(dtr.get_timedelta_second()) cache_msg = "path={path}, lifetime={lifetime:.1f}h, elapsed={elapsed:.1f}h".format( path=cache_file_path, lifetime=self.__cache_lifetime.hour, elapsed=cache_elapsed.hour ) if cache_elapsed < self.__cache_lifetime: self.__logger.debug("cache available: {}".format(cache_msg)) return True self.__logger.debug("cache expired: {}".format(cache_msg)) return False
1,707
instagur/routes.py
Kiniamaro/mtl-releve_instagur
0
2171201
import os.path import json from instagur import app as app from instagur.models.Post import Post from instagur.models.Comment import Comment from instagur.database import db_session from flask import request, send_file, send_from_directory, jsonify, redirect basename = os.path.dirname(__file__) @app.route('/', methods=['GET']) def index(): return send_file('public/index.html') @app.route('/<route>', methods=['GET']) def page(route): if route and not os.path.exists(f'{basename}/public/{route}.html'): return send_file('public/404.html'), 404 return send_file(f'public/{route}.html') @app.route('/js/<filename>', methods=['GET']) def javascript(filename): print(filename) if filename and not os.path.exists(f'{basename}/public/js/{filename}'): return send_file('public/404.html'), 404 return send_from_directory('public/js/', filename) @app.route('/css/<filename>', methods=['GET']) def css(filename): if filename and not os.path.exists(f'{basename}/public/css/{filename}'): return send_file('public/404.html'), 404 return send_from_directory('public/css/', filename) @app.route('/uploads/<filename>', methods=['GET']) def uploads(filename): if filename and \ not os.path.exists(f'{basename}/public/uploads/{filename}'): return send_file('public/404.html'), 404 return send_from_directory('public/uploads/', filename) # @app.route('/disconect', methods=['POST']) # def disconect(): # if app.config['SECRET_KEY'] != secret: @app.errorhandler(404) def page_not_found(e): return send_file('public/404.html'), 404 # POST ROUTES # Get a list of all posts @app.route('/post/all', methods=['GET']) def get_all_posts(): posts = Post.query.filter(Post.is_deleted == False).all() # filter pour pas aller chercher les is_deleted response = [] for post in posts: response.append({ "id": post.id, "filename": post.filename, "story": post.story, "data_type": post.data_type, "likes": post.likes, "created_at": post.created_at, "comments": get_post_comments(post.id) }) return jsonify(response) def get_post_comments(id): comments = Comment.query.filter(Comment.post_id == id).all() response = [] for comment in comments: response.append({ "id": comment.id, "author": comment.author, "comment": comment.comment }) return response # create a new post @app.route('/post', methods=['POST']) def add_post(): # TODO REFACTOR THIS U FUK file = request.files['file'] file_path = f'{basename}/public/uploads/{file.filename}' story = request.form['content'] post = Post(file.filename, story, file.mimetype) db_session.add(post) db_session.commit() file.stream.seek(0) file.save(file_path) return f'{file.filename} uploaded succesfully!' @app.route('/post/<id>/<secret>', methods=['GET']) def delete_post(id, secret): post = Post.query.filter(Post.id == id).first() if not post: print("ereure delete Poste apres code") return (f'no post with id {id}', 500) if app.config['SECRET_KEY'] == secret: post.is_deleted = True db_session.add(post) db_session.commit() print("delete Post") return "post Deleted!" @app.route('/comment/<id>/<secret>', methods=['GET']) def delete_comment(id, secret): comment = Comment.query.filter(Comment.id == id).first() if not comment: return (f'no comment with id {id}', 500) if app.config['SECRET_KEY'] == secret: db_session.delete(comment) db_session.commit() return "comment Deleted!" # add a like to a post @app.route('/post/<id>/like', methods=['POST']) def like(id): post = Post.query.filter(Post.id == id).first() if not post: return (f'no post with id {id}', 500) post.likes += 1 db_session.commit() return 'liked!' # # add a comment to a post # @app.route('/post/<id>/comments', methods=['POST']) # def comment(id): # post = Post.query.filter(Post.id == id).first() # if not post: # return (f'no post with id {id}', 500) # # post.coments += 1 # db_session.commit() # return 'comment!' @app.route('/post/<id>/comments', methods=['GET']) def get_comments(id): comments = Comment.query.filter(Comment.post_id == id).all() response = [] for comment in comments: response.append({ "id": comment.id, "post_id": comment.post_id, "author": comment.author, "comment": comment.comment }) return jsonify(response) # add a comment to a post @app.route('/post/<id>/comment', methods=['POST']) def comment(id): req = request.get_json() print(req) if 'comment' not in req: return jsonify({'error': 'Bad request'}), 400 comment = Comment(id, 'anon', req['comment']) db_session.add(comment) db_session.commit() return 'posted'
5,056
genshinstats/utils.py
rushkii/genshinstats
1
2171949
"""Various utility functions for genshinstats.""" import os.path import re from typing import Optional, Union from .errors import AccountNotFound __all__ = [ 'USER_AGENT', 'recognize_server', 'is_game_uid', 'is_chinese', 'get_output_log' ] USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36" def recognize_server(uid: int) -> str: """Recognizes which server a UID is from.""" server = { 1:'cn_gf01', 5:'cn_qd01', 6:'os_usa', 7:'os_euro', 8:'os_asia', 9:'os_cht', }.get(int(str(uid)[0])) # first digit if server: return server else: raise AccountNotFound(f"UID {uid} isn't associated with any server") def is_game_uid(uid: int) -> bool: """Recognizes whether the uid is a game uid.""" return bool(re.fullmatch(r'[6789]\d{8}',str(uid))) def is_chinese(x: Union[int, str]) -> bool: """Recognizes whether the server/uid is chinese.""" return str(x).startswith(('cn','1','5')) def get_output_log() -> Optional[str]: """Find and return the Genshin Impact output log. None if not found.""" mihoyo_dir = os.path.expanduser('~/AppData/LocalLow/miHoYo/') for name in ["Genshin Impact","原神","YuanShen"]: output_log = os.path.join(mihoyo_dir,name,'output_log.txt') if os.path.isfile(output_log): return output_log return None # no genshin installation
1,474
lib/feng_utils.py
aanilpala/ds_task
0
2172166
def calc_ttype_share_features(group, ttypes): volume_by_ttype = group.groupby("transaction_type", dropna=True).agg( {"amount_internal_currency": ["sum"]} )["amount_internal_currency"]["sum"] total_volume = volume_by_ttype.sum() share_by_ttype = volume_by_ttype / total_volume share_by_ttype.rename(lambda val: f"ttype_{val}_share", inplace=True) ttype_share_features_dict = share_by_ttype.to_dict() ttype_share_features_dict["num_ttypes_seen"] = len( ttype_share_features_dict.values() ) for ttype in ttypes: if not ttype_share_features_dict.get(f"ttype_{ttype}_share"): ttype_share_features_dict[f"ttype_{ttype}_share"] = 0.0 # Not necessary as ttype is never null # if not ttype_features_dict.get(f'ttype_nan_share'): # ttype_features_dict[f'ttype_nan_share'] = 0.0 return ttype_share_features_dict def calc_mccgroup_share_features(group, mcc_groups): volume_by_mcc_group = group.groupby("mcc_group", dropna=False).agg( {"amount_internal_currency": ["sum"]} )["amount_internal_currency"]["sum"] total_volume = volume_by_mcc_group.sum() share_by_mcc_group = volume_by_mcc_group / total_volume # val != val means null share_by_mcc_group.rename( lambda val: f"mcc_{int(val)}_share" if val == val else "mcc_nan_share", inplace=True, ) mcc_group_share_features_dict = share_by_mcc_group.to_dict() mcc_group_share_features_dict["num_mcc_groups_seen"] = len( mcc_group_share_features_dict.values() ) for mcc_group in mcc_groups: if not mcc_group_share_features_dict.get(f"mcc_{mcc_group}_share"): mcc_group_share_features_dict[f"mcc_{mcc_group}_share"] = 0.0 if not mcc_group_share_features_dict.get(f"mcc_nan_share"): mcc_group_share_features_dict[f"mcc_nan_share"] = 0.0 return mcc_group_share_features_dict def calc_ttype_features(ttype_series, ttypes): ttype_counts = ttype_series.value_counts() ttype_counts.rename(lambda val: f"ttype_{val}_count", inplace=True) ttype_features_dict = ttype_counts.to_dict() ttype_features_dict["num_ttypes_seen"] = len(ttype_features_dict.values()) for ttype in ttypes: if not ttype_features_dict.get(f"ttype_{ttype}_count"): ttype_features_dict[f"ttype_{ttype}_count"] = 0 return ttype_features_dict def calc_mccgroup_features(mcc_group_series, mcc_groups): mcc_group_counts = mcc_group_series.value_counts(dropna=False) # val != val means null mcc_group_counts.rename( lambda val: f"mcc_{int(val)}_count" if val == val else "mcc_nan_count", inplace=True, ) mcc_group_features_dict = mcc_group_counts.to_dict() mcc_group_features_dict["num_mcc_groups_seen"] = len( mcc_group_features_dict.values() ) for mcc_group in mcc_groups: if not mcc_group_features_dict.get(f"mcc_{int(mcc_group)}_count"): mcc_group_features_dict[f"mcc_{int(mcc_group)}_count"] = 0 if not mcc_group_features_dict.get(f"mcc_nan_count"): mcc_group_features_dict[f"mcc_nan_count"] = 0 return mcc_group_features_dict def calc_features(group, ttypes, mcc_groups): ttype_feats = calc_ttype_features(group["transaction_type"], ttypes) ttype_share_feats = calc_ttype_share_features(group, ttypes) mccgroup_feats = calc_mccgroup_features(group["mcc_group"], mcc_groups) mccgroup_share_feats = calc_mccgroup_share_features(group, mcc_groups) return { **ttype_feats, **ttype_share_feats, **mccgroup_feats, **mccgroup_share_feats, }
3,654
tests/test_parser_transform.py
fre-sch/redscript-docgen
0
2170442
import pytest from redscript_docgen.parser import parse @pytest.mark.parametrize("source", ( r""" func t(p: t, opt p: t) -> t {} """, r""" class t {} """, r""" class t { let f: t; } """, """ class t { func t() -> t {} } """, r""" @one() func t() -> t {} """, r""" @one() @two() func t() -> t {} """, r""" @one(param, param) @two(param, "param") func t() -> t {} """, )) def test_transform_annotationlist(source): assert parse(source) is not None
512
INF_TC_04.py
kgsharathkumar/MTD_Automation_Infinicut
0
2172457
import time from appium.webdriver.common.touch_action import TouchAction def test_save_to_infinicut(self): # Scroll down the page actions = TouchAction(self.driver) element = self.driver.find_element_by_class_name('android.widget.ScrollView') self.driver.swipe(475, 500, 475, 200, 400) actions.tap(element).perform() # Screen stays for 5 seconds #time.sleep(5) # Click on "Save to Ifinicut" button by using it's resource id # On clicking that Changes made in settings all saved in app save_to_infinicut_Button = self.driver.find_element_by_id('com.mtd.usa.cubcadet:id/save_to_infinicut') save_to_infinicut_Button.click() # Screen stays for 5 seconds #time.sleep(3) # On clicking "Save to Infinicut" button we get pop message with note that "your Settings are saved" # And we have to click "OK" using its resource id ok_popup_msg_button = self.driver.find_element_by_id('android:id/button1') ok_popup_msg_button.click()
1,021
cs_proxy/proxy.py
Rhyssiyan/github-pages-basic-auth-proxy
39
2172345
import sys, os import argparse import colorama from bottle import route, view, request, response, run, hook, abort, redirect, error, install, auth_basic, template, HTTPResponse import simplejson as json import random import logging import datetime import requests from requests.auth import HTTPBasicAuth from jose import jwt from jose.exceptions import JWSError import datetime def main(): # # CLI PARAMS # parser = argparse.ArgumentParser(description='comSysto GitHub Pages Auth Basic Proxy') parser.add_argument("-e", "--environment", help='Which environment.', choices=['cgi', 'wsgi', 'heroku']) parser.add_argument("-gho", "--owner", help='the owner of the repository. Either organizationname or username.') parser.add_argument("-ghr", "--repository", help='the repository name.') parser.add_argument("-obf", "--obfuscator", help='the subfolder-name in gh-pages branch used as obfuscator') parser.add_argument("-p", "--port", help='the port to run proxy e.g. 8881') parser.add_argument("-a", "--authType", help='how should users auth.', choices=['allGitHubUsers', 'onlyGitHubOrgUsers'], required=False ) args = parser.parse_args() if not args.environment: print ('USAGE') print (' proxy that allows only members of the organization to access page: (owner must be an GitHub Organization)') print (' $> cs-gh-proxy -e wsgi -p 8881 --authType onlyGitHubOrgUsers --owner comsysto --repository github-pages-basic-auth-proxy --obfuscator 086e41eb6ff7a50ad33ad742dbaa2e70b75740c4950fd5bbbdc71981e6fe88e3') print ('') print (' proxy that allows all GitHub Users to access page: (owner can be GitHub Organization or normal user)') print (' $> cs-gh-proxy -e wsgi -p 8881 --authType allGitHubUsers --owner comsysto --repository github-pages-basic-auth-proxy --obfuscator <KEY>') print ('') sys.exit(1) if args.environment == 'heroku': args = parser.parse_args(['--environment', 'heroku', '--port', os.environ.get("PORT", 5000), '--authType', os.environ.get("PROXY_AUTH_TYPE", 'allGitHubUsers'), '--owner', os.environ.get("GITHUB_REPOSITORY_OWNER", 'comsysto'), '--repository', os.environ.get("GITHUB_REPOSITORY_NAME", 'github-pages-basic-auth-proxy'), '--obfuscator', os.environ.get("GITHUB_REPOSITORY_OBFUSCATOR", '<KEY>') ]) run_proxy(args) # # global vars # owner = 0 auth_type = 0 jwt_secret = "%032x" % random.getrandbits(128) # # TEMPLATES # default_header_tpl = """<html> <head><title>{{headline}} | Auth Basic GitHub Pages Proxy by comSysto</title></head> <body> <div style="font-family:sans-serif;margin:auto;padding:50px 100px 50px 100px;"> <div style="width:100%;background:#1e9dcc"> <img src="https://comsysto.github.io/github-pages-basic-auth-proxy/public/logo-small.png"> </div> <h1>{{headline}}</h1>""" default_footer_tpl = """</div> </body></html>""" default_tpl = default_header_tpl + '{{body}}' + default_footer_tpl healthcheck_tpl = default_header_tpl + """ <div style="background:#99d100;padding:20px;color:#fff">&#10003; Proxy is running fine.</div>""" + default_footer_tpl error_tpl = default_header_tpl + """ <div style="background:#bf1101;padding:20px;color:#fff">&#10008; {{error}}</div>""" + default_footer_tpl install_success_tpl = default_header_tpl + """ <div style="background:#99d100;padding:20px;color:#fff">&#10003; Installation done.</div> <br><br> %if remote_page_call_status_code != 200: <div style="background:#bf1101;padding:20px;color:#fff">&#10008; Error calling the gh-pages page (Status {{remote_page_call_status_code}}). Please check the env vars (obfuscator, repositoryOwner and repositoryName) and place a index.html inside the obfuscator dir.</div> %else: <div style="background:#99d100;padding:20px;color:#fff">&#10003; Success calling the gh-pages page.</div> %end """ + default_footer_tpl # # HELPERS # def return_json(object, response): response.set_header('Content-Type', 'application/json') return json.dumps(object) def create_jwt_token(): return jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(hours=4)}, jwt_secret, algorithm='HS256') def valid_jwt_token(token): try: res = jwt.decode(token, jwt_secret, algorithms=['HS256']) print (res) return True except JWSError: return False def check_pass(username, password): # # First check if already valid JWT Token in Cookie # auth_cookie = request.get_cookie("cs-proxy-auth") if auth_cookie and valid_jwt_token(auth_cookie): print ('PROXY-AUTH: found valid JWT Token in cookie') return True # # GitHub Basic Auth - also working with username + personal_access_token # print ('PROXY-AUTH: doing github basic auth - authType: {0}, owner: {1}'.format(auth_type, owner)) basic_auth = HTTPBasicAuth(username, password) auth_response = requests.get('https://api.github.com/user', auth=basic_auth) if auth_response.status_code == 200: if auth_type == 'onlyGitHubOrgUsers': print ('PROXY-AUTH: doing org membership request') org_membership_response = requests.get('https://api.github.com/user/orgs', auth=basic_auth) if org_membership_response.status_code == 200: for org in org_membership_response.json(): if org['login'] == owner: response.set_cookie("cs-proxy-auth", create_jwt_token()) return True return False else: response.set_cookie("cs-proxy-auth", create_jwt_token()) return True return False def normalize_proxy_url(url): print ('URL:') print (url) if url.endswith('/') or url == '': return '{0}index.html'.format(url) return url def proxy_trough_helper(url): print ('PROXY-GET: {0}'.format(url)) proxy_response = requests.get(url) if proxy_response.status_code == 200: if proxy_response.headers['Last-Modified']: response.set_header('Last-Modified', proxy_response.headers['Last-Modified']) if proxy_response.headers['Content-Type']: response.set_header('Content-Type', proxy_response.headers['Content-Type']) if proxy_response.headers['Expires']: response.set_header('Expires', proxy_response.headers['Expires']) return proxy_response else: return HTTPResponse(status=proxy_response.status_code, body=template(error_tpl, headline='Error {0}'.format(proxy_response.status_code), error='error during proxy call')) # # BOTTLE APP # def run_proxy(args): # # ERROR HANDLERS # @error(401) def error404(error): return template(error_tpl, headline='Error '+error.status, error=error.body) @error(500) def error500(error): return template(error_tpl, headline='Error '+error.status, error=error.body) # # SPECIAL ENDPOINTS # @route('/health') def hello(): return template(healthcheck_tpl, headline='Healthcheck') @route('/install-success') def hello(): remote_page_call_status_code = proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, '/')).status_code return template(install_success_tpl, headline='Installation Success', remote_page_call_status_code=remote_page_call_status_code) # # make args available in auth callback # global owner, auth_type owner = args.owner auth_type = args.authType @route('/<url:re:.+>') @auth_basic(check_pass) def proxy_trough(url): return proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, normalize_proxy_url(url))) @route('/') @auth_basic(check_pass) def proxy_trough_root_page(): return proxy_trough_helper('https://{0}.github.io/{1}/{2}/{3}'.format(args.owner, args.repository, args.obfuscator, '/index.html')) # # RUN BY ENVIRONMENT # if args.environment == 'wsgi': run(host='localhost', port=args.port, debug=True) if args.environment == 'heroku': run(host="0.0.0.0", port=int(args.port)) else: run(server='cgi')
8,628
maskrcnn_benchmark/modeling/roi_heads/relation_head/utils_co_attention.py
dongxingning/SHA_GCL_for_SGG
5
2171263
""" Based on the implementation of https://github.com/jadore801120/attention-is-all-you-need-pytorch """ import torch import torch.nn as nn from maskrcnn_benchmark.modeling.roi_heads.relation_head.model_transformer import ScaledDotProductAttention,\ MultiHeadAttention, PositionwiseFeedForward class Single_Att_Layer(nn.Module): ''' Compose with two layers ''' def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1): super(Single_Att_Layer, self).__init__() self.slf_attn = MultiHeadAttention( n_head, d_model, d_k, d_v, dropout=dropout) self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout) def forward(self, q_input, k_input, v_input, non_pad_mask=None, slf_attn_mask=None): enc_output, enc_slf_attn = self.slf_attn( q_input, k_input, v_input, mask=slf_attn_mask) enc_output *= non_pad_mask.float() enc_output = self.pos_ffn(enc_output) enc_output *= non_pad_mask.float() return enc_output, enc_slf_attn class Self_Attention_Encoder(nn.Module): """ A encoder model with self attention mechanism. """ def __init__(self, n_head, d_k, d_v, d_model, d_inner, dropout=0.1): super().__init__() self.transformer_layer = Single_Att_Layer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout) def forward(self, input_feats, num_objs): input_feats = input_feats.split(num_objs, dim=0) input_feats = nn.utils.rnn.pad_sequence(input_feats, batch_first=True) # -- Prepare masks bsz = len(num_objs) device = input_feats.device pad_len = max(num_objs) num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len) slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len) non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1) # -- Forward enc_output, enc_slf_attn = self.transformer_layer( input_feats, input_feats, input_feats, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask) enc_output = enc_output[non_pad_mask.squeeze(-1)] return enc_output class Cross_Attention_Encoder(nn.Module): """ A encoder model with self attention mechanism. """ def __init__(self, n_head, d_k, d_v, d_model, d_inner, dropout=0.1): super().__init__() self.transformer_layer = Single_Att_Layer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout) def forward(self, visual_feats, textual_feats, num_objs): visual_feats = visual_feats.split(num_objs, dim=0) visual_feats = nn.utils.rnn.pad_sequence(visual_feats, batch_first=True) textual_feats = textual_feats.split(num_objs, dim=0) textual_feats = nn.utils.rnn.pad_sequence(textual_feats, batch_first=True) # -- Prepare masks bsz = len(num_objs) device = visual_feats.device pad_len = max(num_objs) num_objs_ = torch.LongTensor(num_objs).to(device).unsqueeze(1).expand(-1, pad_len) slf_attn_mask = torch.arange(pad_len, device=device).view(1, -1).expand(bsz, -1).ge(num_objs_).unsqueeze(1).expand(-1, pad_len, -1) # (bsz, pad_len, pad_len) non_pad_mask = torch.arange(pad_len, device=device).to(device).view(1, -1).expand(bsz, -1).lt(num_objs_).unsqueeze(-1) # (bsz, pad_len, 1) # -- Forward enc_output, enc_slf_attn = self.transformer_layer( visual_feats, textual_feats, textual_feats, non_pad_mask=non_pad_mask, slf_attn_mask=slf_attn_mask) enc_output = enc_output[non_pad_mask.squeeze(-1)] return enc_output
3,885
pace/encryption/encryption_exceptions.py
LaudateCorpus1/PACE-python
7
2171883
## ************** ## Copyright 2015 MIT Lincoln Laboratory ## Project: PACE ## Authors: ATLH ## Description: Contains exceptions for encryption code ## Modifications: ## Date Name Modification ## ---- ---- ------------ ## 10 Aug 2015 ATLH Original file ## ************** class EncryptionException(Exception): """ Exception raised when unable to encrypt. Attributes: msg - error message for situation """ def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class DecryptionException(Exception): """ Exception raised when unable to decrypt. Attributes: msg - error message for situation """ def __init__(self, msg): self.msg = msg def __str__(self): return self.msg
848
models/train_classifier.py
rmkeeler/udacity-project-disaster-alerts
0
2172432
# System packages import sys # NLP packages import nltk nltk.download(['punkt','stopwords','wordnet']) from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem.wordnet import WordNetLemmatizer # Analysis packages import numpy as np import pandas as pd import re from sqlalchemy import create_engine import pickle as pkl # Machine Learning packages from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.pipeline import Pipeline from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer from sklearn.multioutput import MultiOutputClassifier from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import MultinomialNB from sklearn.metrics import classification_report def load_data(database_filepath): """ Get the database from the "data" folder in this project's structure. Relevant data must be in a table called "messages" in that database. Return X, y and category names. X is the single column containing message text. We'll extract features from it. y is an array of multiple categories, so this is a multioutput classifier problem. y categories take on 1 if category applies to a message, otherwise 0. Category names are the names of the categories appearing in y. """ engine = create_engine('sqlite:///' + database_filepath) df = pd.read_sql_table(table_name = 'messages', con = engine) feature_vars = ['message'] non_vars = ['id','original','genre'] target_vars = [x for x in df.columns if x not in feature_vars + non_vars] X = df.message.values y = df[target_vars].values cats = df[target_vars].columns.values return X, y, cats def tokenize(text): """ Simple tokenizer we'll use in grid search to see if it's better than CountVectorizer's default tokenizer. 1. Normalize: Strip punctuation and convert to lower 2. Tokenize: Split message into individual words 3. Lemmatize: Reduce words to their root, using verb part of speech. """ punct = re.compile('[^A-Za-z0-9]') norm = punct.sub(' ', text.lower()) tokens = [x for x in word_tokenize(norm) if x not in stopwords.words('english')] lemmatizer = WordNetLemmatizer() lemms = [lemmatizer.lemmatize(w, 'v') for w in tokens] return lemms def build_model(): """ Build a pipeline to extract features from messages and then run them through a multioutput classifier. GridSearchCV will test Random Forest against Multinomial Naive Bayes to see which performs best. """ pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer = tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(estimator = RandomForestClassifier())) ]) params = { 'vect__max_features':[None, 5000], 'tfidf__use_idf':[True, False], 'clf__estimator':[RandomForestClassifier(), MultinomialNB()] } cv = GridSearchCV(pipeline, param_grid = params, cv = 2, verbose = 3) return cv def evaluate_model(model, X_test, Y_test, category_names): """ Eval model using classification_report(). Above each eval table, print the name of the output var being evaluated. Does nothing but eval the model and print output to console. """ y_pred = model.predict(X_test) for i in range(y_pred.shape[1]): print('{}'.format(category_names[i])) print(classification_report(Y_test[:,i], y_pred[:,i]) + '\n') def save_model(model, model_filepath): """ Save the cv model to filepath specified in cmd prompt. As pickle. """ pkl.dump(model, open(model_filepath, 'wb')) def main(): if len(sys.argv) == 3: database_filepath, model_filepath = sys.argv[1:] print('Loading data...\n DATABASE: {}'.format(database_filepath)) X, Y, category_names = load_data(database_filepath) X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) print('Building model...') model = build_model() print('Training model...') model.fit(X_train, Y_train) print('Evaluating model...') evaluate_model(model, X_test, Y_test, category_names) print('Saving model...\n MODEL: {}'.format(model_filepath)) save_model(model, model_filepath) print('Trained model saved!') else: print('Please provide the filepath of the disaster messages database '\ 'as the first argument and the filepath of the pickle file to '\ 'save the model to as the second argument. \n\nExample: python '\ 'train_classifier.py ../data/DisasterResponse.db classifier.pkl') if __name__ == '__main__': main()
4,739
other/greater.py
kirviz/algorithms
0
2172147
def calculate(arr): result = [] for i, num in enumerate(arr): greater = larger(num, arr[i+1:]) result.append(greater if greater is not None else -1) print(' '.join(map(str, result))) def larger(than, inArray): for n in inArray: if n > than: return n if __name__ == "__main__": calculate([1, 3, 2, 4])
361
FIR_Filter2.py
hassan-alhujhoj/ENEL420-FIR-IIR-Filters-for-ECG-Singals
0
2172385
#!/usr/bin/env python # coding: utf-8 # In[1]: get_ipython().run_line_magic('matplotlib', 'notebook') import matplotlib import matplotlib.pyplot as plt import numpy as np from scipy import signal from scipy.fftpack import fft # Data Import # Opens the group data in read only mode # Appends the data to a list # In[2]: file = open("Signal_files/enel420_grp_1.txt", "r") y_0 = [] for line in file: words = line.split(" ") for word in words: if word != "": y_0.append(float(word)) fs = 1024 N = len(y_0) N_2 = int(N/2) t = [x/fs for x in list(range(0, N))] f = [x*fs/N for x in list(range(0, N_2))] FFT_0 = fft(y_0) # In[3]: plt.figure() plt.plot(t, y_0, linewidth=0.5) plt.xlabel("Time (s)") plt.ylabel("Voltage (uV)") plt.title("Unfiltered ECG Signal") plt.savefig("Graphs/Task_1_1.png") # In[4]: plt.figure() plt.plot(t, y_0, linewidth=0.5) plt.xlabel("Time (s)") plt.ylabel("Voltage (uV)") plt.title("Unfiltered ECG Signal") plt.xlim([1, 2]) plt.savefig("Graphs/Task_1_2.png") # In[5]: plt.figure() plt.plot(f, abs(FFT_0)[:N_2]) plt.xlabel("Frequency (Hz)") plt.ylabel("Voltage (uV)") plt.title("Unfiltered ECG Signal Frequency Spectrum") plt.savefig("Graphs/Task_2_1.png") # In[6]: plt.figure() plt.plot(f, 20*np.log10(abs(FFT_0[:N_2]))) plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude (dB)") plt.title("Unfiltered ECG Signal Frequency Spectrum") plt.savefig("Graphs/Task_2_2.png") # In[7]: N_Coeff = 400 # Number of coefficients noise_f = [31.45, 74.36] # Desired stop bands, Hz # In[8]: # Window Filtering width_WF = 8 # Width of stop band, Hz band_1 = [noise_f[0] -width_WF/2, noise_f[0]+width_WF/2] # Define band 1 bounds band_2 = [noise_f[1] -width_WF/2, noise_f[1]+width_WF/2] # Define band 2 bounds filter1_WF = signal.firwin(N_Coeff+1, band_1, window='hann', pass_zero='bandstop', fs=fs) # Filter for noise frequency 1 filter2_WF = signal.firwin(N_Coeff+1, band_2, window='hann', pass_zero='bandstop', fs=fs) # Filter for noise frequency 2 filter_WF = signal.convolve(filter1_WF, filter2_WF) # Combined filter for noise frequencies y_WF = signal.lfilter(filter_WF, 1, y_0) # Apply noise filters to original data f_WF, h_WF = signal.freqz(filter_WF, 1, fs=fs) # FFT_WF = fft(y_WF) # In[9]: # Frequency Spectrum Window Filter Response plt.figure() plt.plot(f, abs(FFT_WF[:N_2])) plt.xlabel("Frequency (Hz)") plt.ylabel("Voltage (uV)") plt.title("Window Filter Frequency Spectrum") plt.savefig("Graphs/Task_5_1_1.png") # In[10]: # Decibel Frequency Spectrum Window Filter Response plt.figure() plt.plot(f, 20*np.log10(abs(FFT_WF[:N_2]))) plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude (dB)") plt.title("Window Filter Frequency Spectrum") plt.savefig("Graphs/Task_5_1_2.png") # In[11]: # Frequency Response Window Filter Response plt.figure() plt.plot(f_WF, 20* np.log10(abs(h_WF))) plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude (dB)") plt.title("Window Filter Frequency Response") plt.savefig("Graphs/Task_5_1_3.png") # In[12]: plt.figure() plt.plot(t, y_WF, linewidth=0.5) plt.xlabel("Time (secs)") plt.ylabel("Voltage (uV)") plt.title("Window Filter Data") plt.savefig("Graphs/Task_5_1_4.png") # In[13]: plt.figure() plt.plot(t, y_WF, linewidth=0.5) plt.xlabel("Time (secs)") plt.ylabel("Voltage (uV)") plt.title("Window Filter Data") plt.xlim([1, 2]) plt.savefig("Graphs/Task_5_1_5.png") # In[14]: trans_PM = 4 # Width of transition from pass band to stop band, Hz width_PM = 8# Width of transition from pass band, Hz # Filter Bands for filtering frequency 1 & 2 band1_PM = [0, noise_f[0] -width_PM-trans_PM, noise_f[0] -width_WF/2, noise_f[0]+width_PM/2, noise_f[0]+width_PM/2+trans_PM, fs/2] band2_PM = [0, noise_f[1] -width_PM-trans_PM, noise_f[1] -width_WF/2, noise_f[1]+width_PM/2, noise_f[1]+width_PM/2+trans_PM, fs/2] gain_PM = [1, 0, 1] # Create filters for filtering frequency 1 & 2 filter1_PM = signal.remez(N_Coeff+1, band1_PM, gain_PM, fs=fs) # Filter frequency 1 filter2_PM = signal.remez(N_Coeff+1, band2_PM, gain_PM, fs=fs) # Filter frequency 2 filter_PM = signal.convolve(filter1_PM, filter2_PM) # Combined Filter y_PM = signal.lfilter(filter_PM, 1, y_0) # Filter original data in time domain f_PM, h_PM = signal.freqz(filter_PM, 1, fs=fs) # Return filter frequency response FFT_PM = fft(y_PM) # Filtered data frequency domain response # In[15]: plt.figure() plt.plot(f, abs(FFT_PM[:N_2])) plt.xlabel("Frequency (Hz)") plt.ylabel("Voltage (uV)") plt.title("Parks-McClellan Filter Frequency Spectrum") plt.savefig("Graphs/Task_5_2_1.png") # In[16]: plt.figure() plt.plot(f_PM, 20*np.log10(abs(h_PM))) plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude (dB)") plt.title("Parks-McClellan Filter Frequency Response") plt.savefig("Graphs/Task_5_2_2.png") # In[17]: plt.figure() plt.plot(t, y_PM, linewidth=0.5) plt.xlabel("Time (secs)") plt.ylabel("Voltage (uV)") plt.title("Parks-McClellan Filter Data") plt.savefig("Graphs/Task_5_2_3.png") # In[18]: plt.figure() plt.plot(t, y_PM, linewidth=0.5) plt.xlabel("Time (secs)") plt.ylabel("Voltage (uV)") plt.title("Parks-McClellan Filter Data") plt.xlim([1, 2]) plt.savefig("Graphs/Task_5_2_4.png") # In[19]: trans_FS = 4 # Width of transition from pass band to stop band, Hz width_FS = 8 # Width of the stop band, Hz band1_FS = [0, noise_f[0] -width_FS/2-trans_FS, noise_f[0] -width_FS/2, noise_f[0]+width_FS/2, noise_f[0]+width_FS/2+trans_FS, fs/2] band2_FS = [0, noise_f[1] -width_FS/2-trans_FS, noise_f[1] -width_FS/2, noise_f[1]+width_FS/2, noise_f[1]+width_FS/2+trans_FS, fs/2] gain_FS = [1, 1, 0, 0, 1, 1] # Gain coefficients of bands filter1_FS = signal.firwin2(N_Coeff+1, band1_FS, gain_FS, fs=fs) # Filter for noise frequency 1 filter2_FS = signal.firwin2(N_Coeff+1, band2_FS, gain_FS, fs=fs) # Filter for noise frequency 2 filter_FS = signal.convolve(filter1_FS, filter2_FS) # Filter for both noise frequencies y_FS = signal.lfilter(filter_FS, 1, y_0) # Apply filter to time domain data f_FS, h_FS = signal.freqz(filter_FS, 1, fs=fs) # Filter Response FFT_FS = fft(y_FS) # Filtered Frequency Domain Response # In[20]: plt.figure() plt.plot(f, abs(FFT_FS[:N_2])) plt.xlabel("Frequency (Hz)") plt.ylabel("Voltage (uV)") plt.title("Frequency Sampling Filter Frequency Spectrum") plt.savefig("Graphs/Task_5_3_1.png") # In[21]: plt.figure() plt.plot(f_FS, 20*np.log10(abs(h_FS))) plt.xlabel("Frequency (Hz)") plt.ylabel("Magnitude (dB)") plt.title("Frequency Sampling Frequency Response") plt.savefig("Graphs/Task_5_3_2.png") # In[22]: plt.figure() plt.plot(t, y_FS, linewidth=0.5) plt.xlabel("Time (s)") plt.ylabel("Voltage (uV)") plt.title("Frequency Sampling Data") plt.savefig("Graphs/Task_5_3_3.png") # In[23]: plt.figure() plt.plot(t, y_FS, linewidth=0.5) plt.xlabel("Time (s)") plt.ylabel("Voltage (uV)") plt.title("Frequency Sampling Data") plt.xlim([1, 2]) plt.savefig("Graphs/Task_5_3_4.png") # In[29]: P_0 = np.var(y_0) P_WF = np.var(y_WF) P_PM = np.var(y_PM) P_FS = np.var(y_FS) N_WF = P_0 - P_WF N_FS = P_0 - P_FS N_PM = P_0 - P_PM print("No Filter Power: {:.2f}".format(np.var(y_0))) print("Window Filter Power: {:.2f}".format(np.var(y_WF))) print("Parks-McLellan Filter Power: {:.2f}".format(np.var(y_PM))) print("Frequency Sampling Filter Power: {:.2f}".format(np.var(y_FS))) print("Window Filter Noise Power: {:.2f}".format(N_WF)) print("Parks-McLellan Filter Noise Power: {:.2f}".format(N_PM)) print("Frequency Sampling Noise Filter Power: {:.2f}".format(N_FS)) # In[ ]:
7,598
code/python/FactSetESG/v1/fds/sdk/FactSetESG/__init__.py
factset/enterprise-sdk
6
2171782
# flake8: noqa """ FactSet ESG API FactSet ESG (powered by FactSet Truvalue Labs) applies machine learning to uncover risks and opportunities from companies' Environmental, Social and Governance (ESG) behavior, which are aggregated and categorized into continuously updated, material ESG scores. The service focuses on company ESG behavior from external sources and includes both positive and negative events that go beyond traditional sources of ESG risk data.<p> FactSet ESG extracts, analyzes, and generates scores from millions of documents each month collected from more than 100,000 data sources in over 13 languages. Sources include news, trade journals, NGOs, watchdog groups, trade blogs, industry reports and social media. Products deliver investable insights by revealing value and risk factors from unstructured data at the speed of current events.</p> # noqa: E501 The version of the OpenAPI document: 1.3.0 Contact: <EMAIL> Generated by: https://openapi-generator.tech """ __version__ = "0.20.0" # import ApiClient from fds.sdk.FactSetESG.api_client import ApiClient # import Configuration from fds.sdk.FactSetESG.configuration import Configuration # import exceptions from fds.sdk.FactSetESG.exceptions import OpenApiException from fds.sdk.FactSetESG.exceptions import ApiAttributeError from fds.sdk.FactSetESG.exceptions import ApiTypeError from fds.sdk.FactSetESG.exceptions import ApiValueError from fds.sdk.FactSetESG.exceptions import ApiKeyError from fds.sdk.FactSetESG.exceptions import ApiException
1,552
hostman/__init__.py
jonhadfield/hostman
20
2172025
#!/usr/bin/env python # -*- coding: utf-8 -*- """Hostman. Usage: hostman add [-fqbvq] [--force] [--path=PATH] ( [ENTRY ...] | [--input-file=FILE] | [--input-url=URL] ) hostman remove [-qbvq] ([--address=<address>] [--names=<names>]) [--path=PATH] [--input-file=FILE] [--input-url=URL] hostman --version Options: -h --help show this help message and exit --version show version and exit -f --force replace matching entries --address=ADDRESS ipv6 or ipv4 address --names=NAMES host names -q --quiet report only failures -p --path=PATH location of hosts file (attempts to detect default) -i --input-file=FILE file containing hosts to import -u --input-url=URL url of file containing hosts to import -b --backup create a backup before writing any changes --exclude=VALUE comma separated list of names or addresses to exclude from operation [default: 127.0.0.1] -v --verbose print verbose output """ from __future__ import print_function from docopt import docopt from python_hosts import Hosts, HostsEntry from .utils import is_writeable, is_readable import sys import os import datetime import shutil from colorama import Fore, init init(autoreset=True) name = "hostman" def backup_hosts(source=None, extension=None): """Backup a hosts file :param source: Path to the hosts file :param extension: The extension to add to the backup file :return: A dict containing the result and user message to output """ if not extension: now = datetime.datetime.now() ext = now.strftime('%Y%m%d%H%M%S') else: ext = extension dest_split = source.split('/') new_filename = ".{0}.{1}".format(dest_split[-1], ext) dest_split[-1] = new_filename dest = "/".join(dest_split) try: shutil.copy(source, dest) return {'result': 'success', 'message': 'Backup written to: {0}'.format(dest)} except IOError: return {'result': 'failed', 'message': 'Cannot create backup file: {0}'.format(dest)} def output_message(message=None, quiet=False): """User friendly result of action :param message: A dict containing the result and a user notification message :return: Exit with 0 or 1, or True if this is not the final output """ res = message.get('result') if res == 'success': if not quiet: print(Fore.GREEN + message.get('message')) sys.exit(0) elif res == 'failed': print(Fore.RED + message.get('message')) sys.exit(1) elif res == 'continue': if not quiet: print(message.get('message')) return True def add(entry_line=None, hosts_path=None, force_add=False): """Add the specified entry :param entry_line: The entry to add :param hosts_path: The path of the hosts file :param force_add: Replace matching any matching entries with new entry :return: A dict containing the result and user message to output """ hosts_entry = HostsEntry.str_to_hostentry(entry_line) if not hosts_entry: output_message({'result': 'failed', 'message': '"{0}": is not a valid entry.'.format(entry_line)}) duplicate_entry = False entry_to_add = False hosts = Hosts(hosts_path) add_result = hosts.add(entries=[hosts_entry], force=force_add) if add_result.get('replaced_count'): hosts.write() return {'result': 'success', 'message': 'Entry added. Matching entries replaced.'} if add_result.get('ipv4_count') or add_result.get('ipv6_count'): entry_to_add = True if add_result.get('duplicate_count'): duplicate_entry = True if entry_to_add and not duplicate_entry: hosts.write() return {'result': 'success', 'message': 'New entry added.'} if not force_add and duplicate_entry: return {'result': 'failed', 'message': 'New entry matches one or more existing.' '\nUse -f to replace similar entries.'} def import_from_file(hosts_path=None, file_path=None): """Import entries from a text file :param hosts_path: Path to the hosts file to update :param file_path: Path to the file containing the hosts entries to import :return: A dict containing the result and user message to output """ if hosts_path and not os.path.exists(hosts_path): return {'result': 'failed', 'message': 'Cannot read hosts file: {0}'.format(hosts_path)} if not os.path.exists(file_path): return {'result': 'failed', 'message': 'Cannot read import file: {0}'.format(file_path)} else: hosts = Hosts(path=hosts_path) pre_count = len(hosts.entries) import_file_output = hosts.import_file(import_file_path=file_path) post_count = len(hosts.entries) write_result = import_file_output.get('write_result') message = 'New entries:\t{0}\nTotal entries:\t{1}\n'.format( post_count - pre_count, write_result.get('total_written') ) return {'result': import_file_output.get('result'), 'message': message} def import_from_url(hosts_path=None, url=None): """Import entries from a text file found on a specific URL :param hosts_path: Path to the hosts file to update :param url: URL of the text file containing the hosts entries to import :return: A dict containing the result and user message to output """ hosts = Hosts(path=hosts_path) pre_count = len(hosts.entries) import_url_output = hosts.import_url(url=url) post_count = len(hosts.entries) write_result = import_url_output.get('write_result') message = 'New entries:\t{0}\nTotal entries:\t{1}\n'.format( post_count - pre_count, write_result.get('total_written') ) return {'result': import_url_output.get('result'), 'message': message} def remove(address_to_remove=None, names_to_remove=None, remove_from_path=None): """Remove entries from a hosts file :param address_to_remove: An ipv4 or ipv6 address to remove :param names_to_remove: A list of names to remove :param remove_from_path: The path of the hosts file to remove entries from :return: A dict containing the result and user message to output """ hosts = Hosts(path=remove_from_path) if address_to_remove or names_to_remove: num_before = hosts.count() hosts.remove_all_matching(address=address_to_remove, name=names_to_remove) hosts.write() difference = num_before - hosts.count() if difference: if difference > 1: str_entry = 'entries' else: str_entry = 'entry' return {'result': 'success', 'message': 'Removed {0} {1}'.format(difference, str_entry)} else: return {'result': 'failed', 'message': 'No matching entries found'} def strip_entry_value(entry_value): """Strip white space from a string or list of strings :param entry_value: value to strip spaces from :return: value minus the leading and trailing spaces """ if isinstance(entry_value, list): new_list = [] for value in entry_value: new_list.append(value.strip()) return ' '.join(new_list) if isinstance(entry_value, str): return entry_value.strip() def real_main(): """ The function called from the script :return: None """ arguments = docopt(__doc__, version='0.1.3') entry = arguments.get('ENTRY') quiet = arguments.get('--quiet') path = arguments.get('--path') force = arguments.get('--force') backup = arguments.get('--backup') address = arguments.get('--address') names = arguments.get('--names') input_file = arguments.get('--input-file') input_url = arguments.get('--input-url') result = None if not path: if sys.platform.startswith('win'): path = r'c:\windows\system32\drivers\etc\hosts' else: path = '/etc/hosts' if not is_readable(path): output_message({'result': 'failed', 'message': 'Unable to read path: {0}.'.format(path)}) new_entry = None if entry: new_entry = strip_entry_value(entry) if backup: result = backup_hosts(source=path) if result.get('result') == 'success': result['result'] = 'continue' output_message(result, quiet=quiet) if arguments.get('add'): if not is_writeable(path): result = {'result': 'failed', 'message': 'Unable to write to: {0}'.format(path)} if new_entry: result = add(entry_line=new_entry, hosts_path=path, force_add=force) if input_file: result = import_from_file(hosts_path=path, file_path=input_file) if input_url: result = import_from_url(hosts_path=path, url=input_url) else: if arguments.get('remove'): result = remove(address_to_remove=address, names_to_remove=names, remove_from_path=path) if result: output_message(result, quiet=quiet) if __name__ == '__main__': real_main()
9,467
fdns/app.py
zhsj/httpdns
0
2171088
import asyncio import dnslib import logging from .provider.http.google import HTTPGoogleResolver logging.basicConfig(level=logging.DEBUG) _LOG = logging.getLogger(__name__) pseudo_edns_client = '192.168.127.12' class DNSServerProtocol(asyncio.DatagramProtocol): def __init__(self): self.resolver = HTTPGoogleResolver() def connection_made(self, transport): self.transport = transport def datagram_received(self, data, addr): _LOG.info('Received from '+str(addr)) asyncio.ensure_future(self.handle(data, addr)) async def handle(self, data, addr): record = dnslib.DNSRecord.parse(data) question = record.questions[0] qname = str(question.qname) qtype = question.qtype ans = await self.resolver.resolve(qname, qtype, pseudo_edns_client) for rr in ans: zone_format = "{rname} {ttl} IN {rtype_name} {rdata}" _rr = { 'rname': rr[0], 'ttl': rr[1], 'rtype_name': dnslib.QTYPE.forward[rr[2]], 'rdata': rr[3] } zone = zone_format.format(**_rr) _LOG.debug(zone) record.add_answer(*dnslib.RR.fromZone(zone)) _LOG.info('Send to '+str(addr)) self.transport.sendto(record.pack(), addr) class DNSServer: def __init__(self, loop): self.loop = loop async def start(self): _LOG.info("Starting UDP server") self.transport, self.proto = await self.loop.create_datagram_endpoint( DNSServerProtocol, local_addr=('127.0.0.1', 9999)) def main(): async def stop(self): self.transport.close() loop = asyncio.get_event_loop() server = DNSServer(loop) asyncio.ensure_future(server.start()) try: loop.run_forever() except KeyboardInterrupt: loop.run_until_complete(server.stop()) loop.close() if __name__ == '__main__': main()
1,960
Analysis/Hugh Blakemore/.ipynb_checkpoints/project_functions-checkpoint.py
data301-2020-winter2/course-project-group_1017
2
2171781
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import os sns.set_style("ticks") sns.set_theme("paper") def load_and_process(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .assign(Over_BMI= np.where(df1['bmi'] > 24.9 ,'Yes','No')) .assign(Under_BMI= np.where(df1['bmi'] < 18.5 ,'Yes','No')) .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) os.chdir(cwd) return df2 def Health(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfH=(df2[(df2['bmi'] >= 18.5) & (df2['bmi'] <=24.9 ) &(df2['smoker'] == 'no') ]) dfH= dfH.reset_index(drop=True) os.chdir(cwd) return dfH def unHealth(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .assign(Healthy = np.where( ( (df1['bmi'] <= 24.9)& (df1['smoker'] == 'no') & (df1['bmi'] >= 18.5 ) ),'yes','no')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH = (df2[(df2['bmi'] < 18.5) | (df2['bmi'] >24.9 ) | (df2['smoker'] == 'yes') ]) dfuH = dfuH.reset_index(drop=True) os.chdir(cwd) return dfuH def plotAvC(df): g=sns.lmplot(x='age', y='charges',data=df, scatter_kws={'s': 100, 'linewidth': 0.5, 'edgecolor': 'w'}) return g def brpltEC(df): g = sns.countplot(x="Excess_charges",data=df) return g def BrPltECD(df): g=sns.histplot( df, x="Excess_charges", element="bars", stat="density",multiple="dodge" ) return g def BrPltECDh(df): g=sns.histplot( df, x="Excess_charges", element="bars", stat="density",multiple="dodge",hue="smoker" ) return g def BrPltECDB(df): g=sns.histplot( df, x="Excess_charges", element="bars", stat="density",multiple="dodge",hue="bmi" ) return g def smoker(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH=(df2[(df2['bmi'] >= 18.5) & (df2['bmi'] <= 24.9 ) & (df2['smoker'] == 'yes') ]) dfuH = dfuH.reset_index(drop=True) return dfuH def underBmi(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH=(df2[(df2['bmi'] < 18.5) & (df2['smoker'] == 'no') ]) dfuH = dfuH.reset_index(drop=True) os.chdir(cwd) return dfuH def overBmi(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH=(df2[(df2['bmi'] > 24.9 ) & (df2['smoker']=='no')]) dfuH = dfuH.reset_index(drop=True) os.chdir(cwd) return dfuH def BoxPlt(df): g=sns.boxplot(x='Healthy',y='charges',data=df) return g def BoxPlts(df): g=sns.boxplot(x='smoker',y='charges',data=df) return g def BoxPltub(df): df1=(df[(df['bmi'] <=24.9 ) &(df['smoker'] == 'no') ]) g=sns.boxplot(x='Under_BMI',y='charges',data=df1) return g def BoxPltob(df): df1=(df[(df['bmi'] >=18.5 ) &(df['smoker'] == 'no') ]) g=sns.boxplot(x='Over_BMI',y='charges',data=df1) return g def mean(df): dfm=df['charges'].mean() dfmr = round(dfm,2) return dfmr def allsmoker(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH=(df2[(df2['smoker'] == 'yes')]) dfuH = dfuH.reset_index(drop=True) os.chdir(cwd) return dfuH def obese(cwd): os.chdir('../..') os.chdir("Data/Raw") df1 = ( pd.read_csv('medical_expenses.csv') .rename({'children':'Dependents'},axis=1) .dropna(subset=['charges']) .drop(['region','Dependents'],axis=1) .replace({'southwest':'SW','southeast':'SE','northeast':'NE','northwest':'NW'}) ) df2=(df1 .assign(Excess_charges= np.where(df1['charges'] > 13270 ,'Yes','No')) .assign(Over_BMI= np.where(df1['bmi'] > 24.9 ,'Yes','No')) .round({"charges":2,"bmi":1}) .sort_values('charges',ascending=True) .reset_index(drop=True) ) dfuH=(df2[(df2['bmi'] > 40.0 ) & (df2['smoker']=='no')]) dfuH = dfuH.reset_index(drop=True) os.chdir(cwd) return dfuH def RawDir(): cwd=os.getcwd() os.chdir('../..') cwdm=os.getcwd() dir = os.chdir("Data/Raw") return dir def returnDir(cwd): dir = os.chdir(cwd) return dir
7,795
module1-introduction-to-sql/buddymove_holidayiq.py
nchibana/DS-Unit-3-Sprint-2-SQL-and-Databases
0
2172020
import sqlite3 import pandas as pd df = pd.read_csv('buddymove_holidayiq.csv') conn = sqlite3.connect('buddymove_holidayiq.sqlite3') df.to_sql('review', con=conn, if_exists='replace') def sql_fetch(conn): cursor = conn.cursor() """ Get total number of rows""" query1 = '''SELECT count(*) FROM review;''' cursor.execute(query1) rows = cursor.fetchall() for row in rows: print(f'Total number of rows: {row[0]}') """ How many users who reviewed at least 100 Nature in the category also reviewed at least 100 in the Shopping category?""" query2 = '''SELECT COUNT("User Id")from review WHERE Nature >= 100 AND Shopping >=100;''' cursor.execute(query2) rows2 = cursor.fetchall() for row in rows2: print(f'Users who reviewed at least 100 Nature and Shopping: {row[0]}') """What are the average number of reviews for each category?""" query3 = '''SELECT AVG(Sports), AVG(Religious), AVG(Nature), AVG(Theatre), AVG(Shopping), AVG(Picnic) FROM review''' cursor.execute(query3) rows3 = cursor.fetchall() rows_result = [item for t in rows3 for item in t] labels = ['Sports','Religious','Nature','Theatre','Shopping', 'Picnic'] for label, row in zip(labels, rows_result): print(f'Avergage number of {label} reviews: {row:.2f}') cursor.close() conn.commit() sql_fetch(conn)
1,426
python/LAC/triedtree.py
lemonsuan/lac
1
2172107
# -*- coding: UTF-8 -*- ################################################################################ # # Copyright (c) 2020 Baidu, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################# """ 该模块实现Tried树,用于进行词典的多模匹配 """ class Node(object): """Trie树的结点. Attributes: next: dict类型,指向子结点 length: int类型,判断节点是否为单词 """ __slots__ = ['next', 'length'] def __init__(self): """初始化空节点.""" self.next = {} self.length = -1 class TriedTree(object): """实现Tried树的类 Attributes: __root: Node类型,Tried树根节点 """ def __init__(self): """初始化TriedTree的根节点__root""" self.__root = Node() def add_word(self, word): """添加单词word到Trie树中""" current = self.__root for char in word: current = current.next.setdefault(char, Node()) current.length = len(word) def make(self): """nothing to do""" pass def search(self, content): """前向最大匹配. 对content的文本进行多模匹配,返回后向最大匹配的结果. Args: content: string类型, 用于多模匹配的字符串 Returns: list类型, 最大匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如: [(0, 2), [4, 7]] """ result = [] length = len(content) current_position = 0 end_position = 0 while current_position < length: p = self.__root matches = [] for key in content[current_position:]: p = p.next.get(key, None) if not p: break if p.length > 0: end_position = current_position + p.length matches.append((current_position, end_position)) if len(matches) > 0: result.append((matches[-1][0], matches[-1][1])) current_position = max(current_position + 1, end_position) return result def search_all(self, content): """多模匹配的完全匹配. 对content的文本进行多模匹配,返回所有匹配结果 Args: content: string类型, 用于多模匹配的字符串 Returns: list类型, 所有匹配单词列表,每个元素为匹配的模式串在句中的起止位置,比如: [(0, 2), [4, 7]] """ result = [] length = len(content) for current_position in range(length): p = self.__root for key in content[current_position:]: p = p.next.get(key, None) if not p: break if p.length > 0: result.append( (current_position, current_position + p.length)) return result if __name__ == "__main__": words = ["百度", "家", "家家", "高科技", "技公", "科技", "科技公司"] string = '百度是家高科技公司' tree = TriedTree() for word in words: tree.add_word(word) for begin, end in tree.search(string): print(string[begin:end])
3,468
autoImport.py
emirhanbilge/AutoLoginandSendMessageInstagram
0
2170379
# -*- coding: utf-8 -*- """ Created on Mon Sep 27 15:55:00 2021 @author: EBB """ from selenium import webdriver from selenium.webdriver.common.by import By import time driver = webdriver.Chrome() driver.get("https://www.instagram.com/accounts/login/?hl=tr") def clickFunctions(xpathURL): while(1): ## internet hızına göre elementin bulunması için geçecek zamanı bilmiyoruz onun için bu yapıyı kullanıyorum. try: driver.find_element(By.XPATH, xpathURL ).click() # bulunca tıklama ve break yapma break except: time.sleep(1) def getElementFunction(xpathURL): e = 1 # elementi return ettirebilmemiz için objeyi eşitliyorum bunu tryda doğrudan return yaparsam none türüne düşebilir while(1): try: e = driver.find_element(By.XPATH, xpathURL) break except: time.sleep(1) return e username = getElementFunction('//*[@id="loginForm"]/div/div[1]/div/label/input') # Kullanıcı adı olan yeri alma password = getElementFunction('//*[@id="loginForm"]/div/div[2]/div/label/input') # Parola kısmını alma # Kullanıcı adını girme , parola girme ve butona basma username.send_keys("K<PASSWORD>anıcı <PASSWORD> gir") # Kullanıcı adını yolla password.send_keys("<PASSWORD>") # Parolayı yolla clickFunctions('//*[@id="loginForm"]/div/div[3]/button/div') # giriş butonuna tıklama time.sleep(5) #anasayfa driver.get("https://www.instagram.com/direct/new/") # mesajlar kısmını açma time.sleep(2) #sayfanın yüklenmesini bekleme clickFunctions('/html/body/div[6]/div/div/div/div[3]/button[2]') # bildirimleri kapata tıklama messagePage = getElementFunction('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input') #kullanıcı adı girme yerini bulma messagePage.send_keys("emreakins0")# kullanıcı adını girme clickFunctions('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button') # kullanıcıyı seçme clickFunctions('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button') #ilete tıklama messageArea =getElementFunction('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea') #mesaj alanını bulma messageArea.send_keys("Merhaba bu ebb'nin hazırladığı test kodudur")# mesaj alanını doldurma clickFunctions('//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button') #gönder butonuna tıklama
2,465
battlab_one.py
rfc6919/battlab
0
2171626
#!/usr/local/bin/python3 import serial import struct import collections Transaction = collections.namedtuple( 'Transaction', 'cmd,response_size,postprocess', defaults=[0, bytes.hex]) transactions = { 'set_voltage_1v2': Transaction(cmd=b'a'), 'set_voltage_1v5': Transaction(cmd=b'b'), 'set_voltage_2v4': Transaction(cmd=b'c'), 'set_voltage_3v0': Transaction(cmd=b'd'), 'set_voltage_3v2': Transaction(cmd=b'o'), 'set_voltage_3v6': Transaction(cmd=b'n'), 'set_voltage_3v7': Transaction(cmd=b'e'), 'set_voltage_4v2': Transaction(cmd=b'f'), 'set_voltage_4v5': Transaction(cmd=b'g'), 'set_psu_on': Transaction(cmd=b'h'), 'set_psu_off': Transaction(cmd=b'i'), 'get_calibration': Transaction(cmd=b'j', response_size=34, postprocess=lambda b: b), 'get_config': Transaction(cmd=b'm', response_size=4), 'get_version': Transaction(cmd=b'p', response_size=2, postprocess=lambda b: int.from_bytes(b, 'big')/1000), 'set_current_low': Transaction(cmd=b'k'), 'set_current_high': Transaction(cmd=b'l'), 'set_averages_1': Transaction(cmd=b's'), # only in version > 1001 'set_averages_4': Transaction(cmd=b't'), 'set_averages_16': Transaction(cmd=b'u'), 'set_averages_64': Transaction(cmd=b'v'), 'reset': Transaction(cmd=b'w', postprocess=lambda b: time.sleep(1)), # only in version > 1002 'set_sample_trig': Transaction(cmd=b'x'), 'set_sample_off': Transaction(cmd=b'y'), 'set_sample_on': Transaction(cmd=b'z'), } # indexes into the returned calibration data for sense resistor scaling values cal_indexes = { 'set_voltage_1v2': 0, 'set_voltage_1v5': 1, 'set_voltage_2v4': 2, 'set_voltage_3v0': 3, 'set_voltage_3v2': 3, 'set_voltage_3v6': 4, 'set_voltage_3v7': 5, 'set_voltage_4v2': 6, 'set_voltage_4v5': 7, } # indexes into the returned calibration data for sleep current offset values offset_indexes = { 'set_voltage_1v2': 8, 'set_voltage_1v5': 9, 'set_voltage_2v4': 10, 'set_voltage_3v0': 11, 'set_voltage_3v2': 12, 'set_voltage_3v6': 13, 'set_voltage_3v7': 14, 'set_voltage_4v2': 15, 'set_voltage_4v5': 16, } class BattLabOne: def __init__(self, device=None): self.sp = None self.calibration_data = None self.cal_adj = None self.offset = None self.low_current = None if device: self.connect(device) def connect(self, device): self.sp = serial.Serial( device, baudrate=115200, parity='N', bytesize=8, stopbits=1) self.sp.reset_input_buffer() self.sp.reset_output_buffer() self.calibrate() return self def calibrate(self): calibration_data_raw = self._do_transaction('get_calibration') self.calibration_data = struct.unpack('>17H', calibration_data_raw) def _do_transaction(self, command): transaction = transactions[command] self.sp.write(transaction.cmd) response = self.sp.read(transaction.response_size) # give the firmware time to do whatever, since we can't know when it's completed if transaction.response_size == 0: time.sleep(0.01) # update calibration and offset if we've just set the supply voltage if command.startswith('set_voltage_'): self.cal_adj = self.calibration_data[cal_indexes[command]]/1000 self.offset = self.calibration_data[offset_indexes[command]] # remember if we've got the low-current sense resistor enabled if command.startswith('set_current_'): self.low_current = command == 'set_current_low' return transaction.postprocess(response) def get_sample(self): raw_sample = self.sp.read(2) sample = int.from_bytes(raw_sample, 'big') sense_resistor_scale = 99 if self.low_current else self.cal_adj lsb = 0.0025 # magic value? current_mA = sample * lsb / sense_resistor_scale #- self.offset return current_mA if __name__ == '__main__': import sys import time import serial.tools.list_ports all_ports = serial.tools.list_ports.comports() battlab_one_ports = [p for p in all_ports if p.vid == 0x0403 and p.pid == 0x6001 and p.serial_number[:2] == "BB"] if len(battlab_one_ports) == 0: print('EE: no BattLab One found', file=sys.stderr) raise RuntimeError('no device found') elif len(battlab_one_ports) > 1: print('EE: multiple BattLab Ones (BattLabs One?) found', file=sys.stderr) raise RuntimeError('too many devices found') device = battlab_one_ports[0].device print(f'II: found BattLab One at {device}') b = BattLabOne(device) print('II: resetting') b._do_transaction('reset') print('II: firmware version {}'.format(b._do_transaction('get_version'))) cmds = 'set_voltage_1v2 set_current_high set_averages_64 set_psu_on'.split(' ') for cmd in cmds: print(f'II: sending command {cmd}') b._do_transaction(cmd) time.sleep(10) print(f'II: starting sampling') b._do_transaction('set_sample_on') sample_count = 10000 sample_sum = 0 sample_min = sys.float_info.max sample_max = 0 start_time = time.time() for n in range(sample_count): current_mA = b.get_sample() print(current_mA, file=f) sample_sum += current_mA sample_min = min(sample_min, current_mA) sample_max = max(sample_max, current_mA) end_time = time.time() b._do_transaction('set_sample_off') b.sp.reset_input_buffer() print(f'II: got {sample_count} samples in {end_time-start_time}s') print(f'II: cal_adj:{b.cal_adj}') print(f'II: min: {sample_min} max: {sample_max} avg: {sample_sum/sample_count}')
5,853
Leetcode/Competition/2.py
ZR-Huang/AlgorithmPractices
1
2170479
from typing import List class Solution: def numTimesAllBlue(self, light: List[int]) -> int: # brute force # time limitation exceeded ans = 0 length = len(light) states = ['0'] * len(light) visited = dict() for t, i in enumerate(light): all_light = True key = ''.join(states[:i]) if key in visited: all_light = visited[key] else: for j in range(i-1): if states[j] == '0': all_light = False break visited[''.join(states[:i])] = all_light states[i-1] = '2' if all_light else '1' j = i while j < length and states[j] != '0': states[j] = '2' if states[j-1] == '2' else states[j] j += 1 all_blue = True if states[j-1]=='2': while j < length: if states[j] == '1': all_blue = False break j += 1 ans += 1 if all_blue else 0 return ans def numTimesAllBlue(self, light: List[int]) -> int: # time limitation exceeded length = len(light) states = [0]*length light_not_blue = set() ans = 0 for t, i in enumerate(light): if i-1>0: if states[i-2]==2: states[i-1] = 2 else: states[i-1] = 1 light_not_blue.add(i-1) else: states[i-1] = 2 j = i while j < length and states[j] > 0: if states[j] == 1 and states[j-1]==2: states[j] = 2 light_not_blue.remove(j) j+=1 if not light_not_blue: ans += 1 return ans def numTimesAllBlue(self, light: List[int]) -> int: currMax, ans = 0, 0 for t, num in enumerate(light): currMax = max([currMax, num]) if currMax == t + 1: ans += 1 return ans print(Solution().numTimesAllBlue([2,1,3,5,4])) print(Solution().numTimesAllBlue([3,2,4,1,5])) print(Solution().numTimesAllBlue([4,1,2,3])) print(Solution().numTimesAllBlue([2,1,4,3,6,5])) print(Solution().numTimesAllBlue([1,2,3,4,5,6]))
2,444
cmds/abos.space.py
abos5/pythontutor
0
2171051
#! /usr/bin/python import sys import getopt _debug = 0 def usage(err=False): print ''' ssh: abos.space -option=value ''' if err: print 'invalid argv!' def main(argv): # retrive options try: opts, args = getopt.getopt(argv, "hg:d:", ["hey", ]) except getopt.GetoptError: usage(True) sys.exit(2) # working on options for opt, arg in opts: if opt in ("-h", "-help"): usage() sys.exit() elif opt == '-d': global _debug _debug = 1 elif opt in ("-g", "-grammar"): print arg if __name__ == '__main__': argv = sys.argv argv.pop(0) main(sys.argv) # sys. # eof
734
attic/2019/contributions-2019/open/mudaliar-yptu/PWAF/testcases/hovers_test.py
Agriad/devops-course
0
2171854
""" @author: <NAME> @email: <EMAIL> @date: 20-May-18 """ from pages.hovers_page import HoversPage from pages.welcome_page import WelcomePage from utility.drivermanager import DriverManagerFirefox, DriverManagerChrome from nose.plugins.attrib import attr @attr(group=['kth']) class HoversTestFirefox(DriverManagerFirefox): def test_hover_functionality(self): welcome_page = WelcomePage(self.driver) welcome_page.verify_welcome_page().click_on_link("Hovers") hovers_page = HoversPage(self.driver) hovers_page.verify_hovers_functionality() @attr(group=['kth']) class HoversTestChrome(DriverManagerChrome): def test_hover_functionality(self): welcome_page = WelcomePage(self.driver) welcome_page.verify_welcome_page().click_on_link("Hovers") hovers_page = HoversPage(self.driver) hovers_page.verify_hovers_functionality()
896
samples/web/content/apprtc/util_test.py
jsmithersunique/thegetvip_RTCsamples
0
2170841
# Copyright 2014 Google Inc. All Rights Reserved. import unittest import util class UtilTest(unittest.TestCase): def testGetMessageFromJson(self): self.assertEqual(None, util.get_message_from_json("")) self.assertEqual({}, util.get_message_from_json("{}")) self.assertEqual( {"a": "b","c": False, "d": 1, "e" : [1,2,"3"]}, util.get_message_from_json('{"a":"b","c":false,"d":1,"e":[1,2,"3"]}')) def testHasMsgField(self): testObject = { "a": False, "b": "str", "c": None, "d": {}, "e": [1, 2, "3"], "f": [], "g": {'A': 1} } self.assertEqual( True, util.has_msg_field(testObject, "a", bool)) self.assertEqual( False, util.has_msg_field(testObject, "a", basestring)) self.assertEqual( False, util.has_msg_field(testObject, "c", bool)) self.assertEqual( False, util.has_msg_field(testObject, "d", dict)) self.assertEqual( True, util.has_msg_field(testObject, "e", list)) self.assertEqual( False, util.has_msg_field(testObject, "f", list)) self.assertEqual( True, util.has_msg_field(testObject, "g", dict)) self.assertEqual( False, util.has_msg_field(testObject, "h", dict)) self.assertEqual( False, util.has_msg_field(None, "a", dict)) def testHasMsgFields(self): testObject = { "a": False, "b": "str", "c": None, "d": {}, "e": [1, 2, "3"], "f": [], "g": {'A': 1} } self.assertEqual( True, util.has_msg_fields( testObject, (("a", bool), ("b", basestring), ("e", list)))) self.assertEqual( False, util.has_msg_fields( testObject, (("a", bool), ("b", bool), ("e", list)))) self.assertEqual( False, util.has_msg_fields( testObject, (("a", bool), ("h", basestring), ("e", list)))) def testGenerateRandomGeneratesStringOfRightLength(self): self.assertEqual(17, len(util.generate_random(17))) self.assertEqual(23, len(util.generate_random(23))) if __name__ == '__main__': unittest.main()
2,251
openfda/res/tests/scrape_historic_test.py
hobochili/openfda
0
2170959
#!/usr/bin/python import unittest from openfda.res import scrape_historic import os import simplejson as json class ScrapeHistoricUnitTest(unittest.TestCase): 'Scrape Historic Unit Test' def set_up(self): pass def test_scrape_june__13__2012(self): mydir = os.path.dirname(__file__) html = open(mydir + '/data/ucm308307.htm') expected_json = open(mydir + '/data/ucm308307.json').read() scraped_list = scrape_historic.scrape_report(html.read()) actual_json = '\n'.join([json.dumps(s) for s in scraped_list]) self.assertEqual(expected_json, actual_json, mydir + '/data/ucm308307.json') def test_scrape_one_recall(self): mydir = os.path.dirname(__file__) recall = open(mydir + '/data/one-recall.txt').read().strip() expected_recall_json = open(mydir + '/data/one-recall.json').read().strip() actual_recall_json = json.dumps(scrape_historic.scrape_one_recall(recall)) self.assertEqual(expected_recall_json, actual_recall_json, mydir + '/data/one-recall.json') if __name__ == '__main__': unittest.main()
1,062
Labs/ValueFunctionIteration/VFI_Solutions.py
jessicaleete/numerical_computing
10
2172268
#================================================ #Solutions To Value Function Iteration Lab #================================================ #"Problem 1" #import scipy as sp #from matplotlib import pyplot as plt #from matplotlib import cm #from mpl_toolkits . mplot3d import Axes3D # # #beta = 0.9; #T = 10; #N = 100; #u = lambda c: sp.sqrt(c); #W = sp.linspace(0,1,N); #X, Y = sp.meshgrid(W,W); #Wdiff = Y-X #index = Wdiff <0; #Wdiff[index] = 0; #util_grid = u(Wdiff); #util_grid[index] = -10**10; #V = sp.zeros((N,T+2)); #psi = sp.zeros((N,T+1)); # # #for k in xrange(T,-1,-1): # val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1)); # vt = sp.amax(val, axis = 1); # psi_ind = sp.argmax(val,axis = 1) # V[:,k] = vt; # psi[:,k] = W[psi_ind]; # # # #x=sp.arange(0,N) #y=sp.arange(0,T+2) #X,Y=sp.meshgrid(x,y) #fig1 = plt.figure() #ax1= Axes3D(fig1) #ax1.plot_surface(W[X],Y,sp.transpose(V), cmap=cm.coolwarm) #plt.show () # #fig2 = plt.figure() #ax2 = Axes3D(fig2) #y = sp.arange(0,T+1) #X,Y=sp.meshgrid(x,y) #ax2.plot_surface(W[X],Y,sp.transpose(psi), cmap = cm.coolwarm) #plt.show() #================================================ "Problem 2" #import scipy as sp #from matplotlib import pyplot as plt # #beta = 0.9; #T = 1000; #N = 100; #u = lambda c: sp.sqrt(c); #W = sp.linspace(0,1,N); #X, Y = sp.meshgrid(W,W); #Wdiff = sp.transpose(X-Y); #index = Wdiff <0; #Wdiff[index] = 0; #util_grid = u(Wdiff); #util_grid[index] = -10**10; #V = sp.zeros((N,T+2)); #psi = sp.zeros((N,T+1)); # # #for k in xrange(T,-1,-1): # val = util_grid + beta*sp.tile(sp.transpose(V[:,k+1]),(N,1)); # vt = sp.amax(val, axis = 1); # psi_ind = sp.argmax(val,axis = 1) # V[:,k] = vt; # psi[:,k] = W[psi_ind]; # # # #plt.plot(psi[99,:]) #================================================ #"Problem 3" #import scipy as sp #from matplotlib import pyplot as plt # #beta = 0.99 #N = 1000 #u = lambda c: sp.sqrt(c) #W = sp.linspace(0,1,N) #X, Y = sp.meshgrid(W,W) #Wdiff = sp.transpose(X-Y) #index = Wdiff <0 #Wdiff[index] = 0 #util_grid = u(Wdiff) #util_grid[index] = -10**10 # #Vprime = sp.zeros((N,1)) #psi = sp.zeros((N,1)) #delta = 1.0 #tol = 10**-9 #it = 0 #max_iter = 500 # #while (delta >= tol) and (it < max_iter): # V = Vprime # it += 1; # print(it) # val = util_grid + beta*sp.transpose(V) # Vprime = sp.amax(val, axis = 1) # Vprime = Vprime.reshape((N,1)) # psi_ind = sp.argmax(val,axis = 1) # psi = W[psi_ind] # delta = sp.dot(sp.transpose(Vprime - V),Vprime-V) #plt.plot(W,psi) #plt.show()
2,569
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/cms/djangoapps/contentstore/management/commands/restore_asset_from_trashcan.py
osoco/better-ways-of-thinking-about-software
3
2171243
"""Management command to restore assets from trash""" from django.core.management.base import BaseCommand from xmodule.contentstore.utils import restore_asset_from_trashcan class Command(BaseCommand): """Command class to handle asset restore""" help = '''Restore a deleted asset from the trashcan back to it's original course''' def add_arguments(self, parser): parser.add_argument('location') def handle(self, *args, **options): restore_asset_from_trashcan(options['location'])
518
app/main.py
HaeckelK/journal-api
0
2172424
from typing import List from datetime import datetime from fastapi import Depends, FastAPI, HTTPException from sqlalchemy.orm import Session from . import crud, models, schemas from .database import SessionLocal, engine models.Base.metadata.create_all(bind=engine) app = FastAPI() # Dependency def get_db(): db = SessionLocal() try: yield db finally: db.close() @app.post("/journals/", response_model=schemas.Journal) def create_journal(journal: schemas.JournalCreate, db: Session = Depends(get_db)): if journal.date != -1 and len(str(journal.date)) != 8: raise HTTPException(status_code=400, detail="date must be in yyyymmdd format") if journal.date == -1: journal.date = datetime.today().strftime('%Y%m%d') db_journal = crud.get_journal(db, journal_id=journal.date) if db_journal: raise HTTPException(status_code=400, detail=f"Journal with date {journal.date} already registered") return crud.create_journal(db=db, journal=journal) @app.get("/journals/", response_model=List[schemas.Journal]) def read_journals(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): journals = crud.get_journals(db, skip=skip, limit=limit) return journals @app.get("/journals/{journal_id}", response_model=schemas.Journal) def read_journal(journal_id: int, db: Session = Depends(get_db)): db_journal = crud.get_journal(db, journal_id=journal_id) if db_journal is None: raise HTTPException(status_code=404, detail=f"Journal with id {journal_id} not found") return db_journal @app.delete("/journals/{journal_id}", response_model=schemas.Journal) def delete_journal(journal_id: int, db: Session = Depends(get_db)): db_journal = crud.delete_journal(db, journal_id=journal_id) if db_journal is None: raise HTTPException(status_code=404, detail=f"Journal with id {journal_id} not found") return db_journal @app.post("/journals/{journal_id}/items/", response_model=schemas.Item) def create_item_for_journal( journal_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db) ): db_journal = crud.get_journal(db, journal_id=journal_id) if not db_journal: raise HTTPException(status_code=400, detail=f"Journal with id {journal_id} not registered") return crud.create_journal_item(db=db, item=item, journal_id=journal_id) @app.get("/items/", response_model=List[schemas.Item]) def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)): items = crud.get_items(db, skip=skip, limit=limit) return items @app.delete("/items/{item_id}", response_model=schemas.Item) def delete_item(item_id: int, db: Session = Depends(get_db)): db_item = crud.delete_item(db, item_id=item_id) if db_item is None: raise HTTPException(status_code=404, detail=f"Item with id {item_id} not found") return db_item
2,894
mysql-dummy-data/main.py
panticne/Teaching-HEIGVD-AMT-2019-Project-One
1
2170473
import collections, re import time import copy import config from constants import * from models import * tables = dict() dummy_rows = dict() def initial_read_phpmyadmin(): with open(config.input_file, "r") as f: line = f.readline() current_table_name = "" while line: create_table = re.match(CREATE_TABLE_REGEX, line) alter_table = re.match(ALTER_TABLE_REGEX, line) primary_key = re.match(PRIMARY_KEY_REGEX_PHPMYADMIN, line) row_entry = re.match(TABLE_COLUMN_REGEX, line) row_entry_date = re.match(TABLE_COLUMN_REGEX_DATE, line) foreign_key = re.match(FOREIGN_KEY_REGEX_PHPMYADMIN, line) auto_increment = re.match(AUTO_INCREMENT_REGEX, line) if create_table: current_table_name = create_table.group('table_name') tables[current_table_name] = Table(current_table_name) elif row_entry: tables[current_table_name].columns.append(Column(row_entry.group('column_name'), row_entry.group('type'), length=int(row_entry.group('length')))) elif row_entry_date: tables[current_table_name].columns.append(Column(row_entry_date.group('column_name'), row_entry_date.group('type'))) elif alter_table: current_table_name = alter_table.group('table_name') elif foreign_key: other_table_name = foreign_key.group('other_table') other_column_name = foreign_key.group('other_column') this_column_name = foreign_key.group('foreign_key') tables[current_table_name].add_foreign_key(this_column_name, tables[other_table_name], other_column_name) elif primary_key: column = Table.find_column(tables[current_table_name], primary_key.group('primary_key')) column.pk = True elif auto_increment: column = Table.find_column(tables[current_table_name], auto_increment.group('column_name')) column.auto_increment = True line = f.readline() #Original tables variable will be used for topological pruning of the graph, #the copy will be used as return copy.copy(tables) def initial_read_mysqldump(): foreign_keys = [] with open(config.input_file, "r") as f: line = f.readline() current_table_name = "" while line: create_table = re.match(CREATE_TABLE_REGEX, line) primary_key = re.match(PRIMARY_KEY_REGEX_MYSQLDUMP, line) row_entry = re.match(TABLE_COLUMN_REGEX, line) row_entry_date = re.match(TABLE_COLUMN_REGEX_DATE, line) foreign_key = re.match(FOREIGN_KEY_REGEX_MYSQLDUMP, line) if create_table: current_table_name = create_table.group('table_name') tables[current_table_name] = Table(current_table_name) elif row_entry: column = Column(row_entry.group('column_name'), row_entry.group('type'), length=int(row_entry.group('length'))) tables[current_table_name].columns.append(column) if 'AUTO_INCREMENT' in line: column.auto_increment = True elif row_entry_date: tables[current_table_name].columns.append(Column(row_entry_date.group('column_name'), row_entry_date.group('type'))) elif foreign_key: other_table_name = foreign_key.group('other_table') other_column_name = foreign_key.group('other_column') this_column_name = foreign_key.group('foreign_key') foreign_keys.append((tables[current_table_name], this_column_name, other_table_name, other_column_name)) elif primary_key: column = Table.find_column(tables[current_table_name], primary_key.group('primary_key')) column.pk = True line = f.readline() #Process foreign keys after structure has been set, otherwise there will be an error because #foreign keys might reference non-existing tables for table, this_column_name, other_table_name, other_column_name in foreign_keys: table.add_foreign_key(this_column_name, tables[other_table_name], other_column_name) return copy.copy(tables) def write_to_sql(): with open(config.input_file, 'r') as input, open(config.output_file, 'w') as output: line = input.readline() current_table_name = None while line: create_table = re.match(CREATE_TABLE_REGEX, line) if create_table: current_table_name = create_table.group('table_name') elif line.startswith(")"): output.write(line) line = input.readline() output.write(line) line = input.readline() for row in generate_insert_clause(current_table_name): output.write(row) continue output.write(line) line = input.readline() def generate_insert_clause(table_name): table = tables_copy[table_name] columns = list(map(lambda column: "`{}`".format(column.name), table.columns)) lines = ["INSERT INTO `{0}` ({1}) VALUES\n".format(table_name, ', '.join(columns))] lines.extend(table.dummy_rows) return lines if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Generates dummy data given a MySQL dump file.") parser.add_argument('input_file') parser.add_argument('--rows', help="Enter the desired number of rows.", type=int, default=10) parser.add_argument('--phpmyadmin', help="Use this flag if the dump was exported using phpmyadmin", action='store_true') parser.add_argument('--output', help="Set the output file name. Default is output.sql.", type=str, default="output.sql") args = parser.parse_args() config.row_count = args.rows config.input_file = args.input_file config.output_file = args.output tables_copy = initial_read_mysqldump() if not args.phpmyadmin else initial_read_phpmyadmin() while tables: queue = collections.deque() #Process tables that have no dependencies for table_name in list(tables): if tables[table_name].outdegree == 0: queue.append(tables[table_name]) del tables[table_name] while queue: table = queue.popleft() for column in table.columns: column.generate_data() for child_table in table.child_tables: #Decrement parent table's outdegree by 1, now that this child table #has been processed child_table.outdegree -= 1 table.insert_rows() dummy_rows[table.name] = table.dummy_rows write_to_sql()
7,057
data_serializers/chinese.py
wjshan/qrcode
0
2171650
from .base import SerializerAble import codecs from ..version_serializers import Version class ChineseSerializer(SerializerAble): flag = '1101' @staticmethod def counting_indicator_map(version_num: int) -> int: if version_num <= 9: return 8 elif version_num <= 26: return 10 else: return 12 def encode(self, **kwargs) -> str: """ 1. 对于第一字节值在0xA1到0xAA范围,第二字节值在0xA1到0xFE范围的字符 1)第一字节值减去0xA1 2)将1)的结果乘以0x60 3)第二字节值减去0xA1 4)将2)的结果加上3)的结果 5)将结果转换为13位二进制串 2. 对于第一字节值在0xBO到0xFA范围,第二字节值在0xA1到0xFE范围的字符 1)第一字节值减去0xA6 2)将1)的结果乘以0x60 3)第二字节值减去0xA1 4)将2)的结果加上3)的结果 5)将结果转换为13位二进制串 """ character = "" for _c in self.raw_data: c1, c2 = _c.encode('gb2312') if 0xA1 <= c1 <= 0xAA: c1 -= 0xA1 else: c1 -= 0xA6 c1 *= 0x60 c2 -= 0xA1 character += self.bin(c1 + c2, 13) return character @classmethod def get_len(cls, version: Version, raw_data: str) -> int: counting = cls.counting_indicator_map(version.version_num) return 4 + 4 + counting + 13 * len(raw_data)
1,326
generateURDF.py
giaco5988/BrickRegistration
121
2172322
import os import sys import threading from object2urdf import ObjectUrdfBuilder import shutil # Build single URDFs object_folder = "lego" #An ugly copy paste of build_library to catch exception def safe_build_library(urdfbuilder, **kwargs): print("\nFOLDER: %s"%(urdfbuilder.object_folder)) # Get all OBJ files obj_files = urdfbuilder._get_files_recursively(urdfbuilder.object_folder, filter_extension='.obj', exclude_suffix=urdfbuilder.suffix) stl_files = urdfbuilder._get_files_recursively(urdfbuilder.object_folder, filter_extension='.stl', exclude_suffix=urdfbuilder.suffix) obj_folders=[] for root, _, full_file in obj_files: obj_folders.append(root) try: urdfbuilder.build_urdf(full_file,**kwargs) except: print("An exception occured during " + full_file ) common = os.path.commonprefix([urdfbuilder.object_folder,full_file]) rel = os.path.join(full_file.replace(common,'')) print('\tBuilding: %s'%(rel) ) for root, _, full_file in stl_files: if root not in obj_folders: try: urdfbuilder.build_urdf(full_file,**kwargs) except: print("An exception occured during " + full_file) common = os.path.commonprefix([urdfbuilder.object_folder,full_file]) rel = os.path.join(full_file.replace(common,'')) print('Building: %s'%(rel) ) def thread_function(tindex,nthreads): shutil.copy( "_prototype.urdf",object_folder+"-"+str(tindex),follow_symlinks=True) builder = ObjectUrdfBuilder(object_folder+"-"+str(tindex)) #we use center = "geometry" instead of "mass" because it fails on some objects and make the program crash #we use depth=1 as an extra parameter for vhacd so that it sacrifice collision geometry quality so that #it goes faster during simulation #oclAcceleration=0 safe_build_library(builder,force_overwrite=True, decompose_concave=True, force_decompose=False, center = 'geometry',depth=1) nthreads = 8 for i in range(nthreads): x = threading.Thread(target=thread_function, args=(i,nthreads)) x.start()
2,259
api/Folder.py
BowangLan/uw-tools
0
2171652
import typing from __future__ import annotations from dataclasses import dataclass from .util import with_client import httpx from .Site import Site from .File import File @dataclass class Folder: id: str name: str full_name: str = None parent: Folder = None full_files_url: str = None full_folders_url: str = None files: typing.List[File] = None folders: typing.List[Folder] = None created_at: str = None updated_at: str = None site: Site def make_get_items_params(self): params = { "include[]": [ "user", "usage_rights", "enhanced_preview_url", "context_asset_string", ], "per_page": "20", "sort": "", "order": "", } return params @with_client def get_files(self, client: httpx.Client = None): params = self.make_get_items_params() res = client.get(self.full_files_url, params=params) data = res.json() self.files = [ File( **data, site=self.site, parent=self ) for i in data] return self.files @with_client def get_folders(self, client: httpx.Client = None, with_params=True): if with_params: params = self.make_get_items_params() res = client.get(self.full_folders_url, params=params) else: res = client.get(self.full_folders_url) data = res.json() self.folders = [ Folder( id=str(i['id']), name=i['name'], site=self.site, parent=self ) for i in data] @with_client def get_items(self, client: httpx.Client = None): self.get_folders(client=client) self.get_files(client=client)
1,919
PaddleFSL/paddlefsl/utils/eval.py
Chaoqun-Guo/FSL-Mate
0
2171168
# Copyright 2021 PaddleFSL Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle __all__ = ['classification_acc'] def classification_acc(predict, label): """ Calculate classification accuracy: correct_result / sample_number Args: predict(paddle.Tensor): predictions, shape (sample_number, class_number), in the form of one-hot coding. label(paddle.Tensor): labels, shape (sample_number), in the form of continuous coding. Returns: accuracy(float): classification accuracy. Examples: ..code-block:: python import paddlefsl.utils as utils predictions = paddle.to_tensor([[0.1, 0.9], [0.8, 0.2]], dtype='float32') labels = paddle.to_tensor([0, 0], dtype='int64') accuracy = utils.classification_acc(predictions, labels) # accuracy: 0.5 """ correct = 0 for i in range(predict.shape[0]): if paddle.argmax(predict[i]) == int(label[i]): correct += 1 return float(correct) / predict.shape[0]
1,543
code_week29_119_1115/freedom_trail.py
dylanlee101/leetcode
0
2171804
''' 电子游戏“辐射4”中,任务“通向自由”要求玩家到达名为“Freedom Trail Ring”的金属表盘,并使用表盘拼写特定关键词才能开门。 给定一个字符串 ring,表示刻在外环上的编码;给定另一个字符串 key,表示需要拼写的关键词。您需要算出能够拼写关键词中所有字符的最少步数。 最初,ring 的第一个字符与12:00方向对齐。您需要顺时针或逆时针旋转 ring 以使 key 的一个字符在 12:00 方向对齐,然后按下中心按钮,以此逐个拼写完 key 中的所有字符。 旋转 ring 拼出 key 字符 key[i] 的阶段中: 您可以将 ring 顺时针或逆时针旋转一个位置,计为1步。旋转的最终目的是将字符串 ring 的一个字符与 12:00 方向对齐,并且这个字符必须等于字符 key[i] 。 如果字符 key[i] 已经对齐到12:00方向,您需要按下中心按钮进行拼写,这也将算作 1 步。按完之后,您可以开始拼写 key 的下一个字符(下一阶段), 直至完成所有拼写。 示例:     输入: ring = "godding", key = "gd" 输出: 4 解释: 对于 key 的第一个字符 'g',已经在正确的位置, 我们只需要1步来拼写这个字符。 对于 key 的第二个字符 'd',我们需要逆时针旋转 ring "godding" 2步使它变成 "ddinggo"。 当然, 我们还需要1步进行拼写。 因此最终的输出是 4。 提示: ring 和 key 的字符串长度取值范围均为 1 至 100; 两个字符串中都只有小写字符,并且均可能存在重复字符; 字符串 key 一定可以由字符串 ring 旋转拼出。 来源:力扣(LeetCode) 链接:https://leetcode-cn.com/problems/freedom-trail ''' class Solution: def findRotateSteps(self, ring: str, key: str) -> int: MAX = float('inf') ringDict = collections.defaultdict(list) for i, r in enumerate(ring): ringDict[r].append(i) def minDist(target, preState): res = MAX for i, d in preState: curDist = d + min(abs(target - i), abs(target - i + n), abs(target - i - n)) res = min(res, curDist) return res n = len(ring) dp = [(0, 0)] for k in key: dp = [(index, minDist(index, dp)) for index in ringDict[k]] return min(dist for _, dist in dp) + len(key)
1,484
generators.py
Cesar17188/python_avanzado
0
2170370
import time def fibo_gen(max: int): n1 = 0 n2 = 1 counter = 0 while True: if counter == 0: counter += 1 yield n1 elif counter == 1: counter += 1 yield n2 else: if counter < max: aux = n1 + n2 n1, n2 = n2, aux counter += 1 yield aux else: break def call_fibo(func): def wrapper(*args, **kwargs): print('La seríe Fibonnaci es: ') func(*args, **kwargs) print('Gracias por usar secuencia Fibonacci, Hasta pronto!!') return wrapper @call_fibo def iter_element(max): fibonacci = fibo_gen(max) for i, element in enumerate(fibonacci): print(f'element {i+1} --> {element}') time.sleep(1) if __name__ == '__main__': max = int(input('Ingrese la cantidad de número de la seríe Fibonacci que desea: ')) iter_element(max)
978
applications/talos/contrib/ldap.py
triflesoft/django-application-talos
2
2172152
from ldap3 import Server, Connection, ALL from ldap3.core.exceptions import * # TODO too generic import from talos.models import BasicIdentityDirectoryOption basic_identity_directory_option = BasicIdentityDirectoryOption.objects.filter( directory__code='ldap') class LdapConnection: def __init__(self): needful_items = ['host', 'username', 'password', 'port', 'user_search_base', 'cn_search_base'] values = {} for item in needful_items: try: queryset = basic_identity_directory_option.get(name=item) values[item] = queryset.value except BasicIdentityDirectoryOption.DoesNotExist: raise BasicIdentityDirectoryOption.DoesNotExist( 'Please specify ldap {item} in BasicIdentityDirectoryOption'.format(item=item)) self.host = values['host'] self.port = int(values['port']) self.username = values['username'] self.password = values['password'] self.user_search_base = values['user_search_base'] self.cn_search_base = values['cn_search_base'] self.server = self.server_connect() self.connection = self.create_connection() def server_connect(self): server = Server(self.host, port=self.port, get_info=ALL) if not server.check_availability(): raise Exception('LDAP Server is not reachable') return server def create_connection(self): if not self.server: raise Exception("Please run connect()") connection = Connection(self.server, user=self.username, password=self.password, raise_exceptions=True) connection.open() try: connection.bind() except LDAPInvalidCredentialsResult: raise LDAPAttributeError("Invalid LDAP Credentials") return connection def check_credentials(self, username, password): # If user principal name is entered (<EMAIL>) if '@' in username: search_filter = "userPrincipalName" search_value = username # If user NetBios\sAMAccountName is entered elif "\\" in username: net_bios_name = username.split('\\')[0] username = username.split('\\')[1] self.connection.search( search_base=self.cn_search_base, search_filter='(netbiosname=*)', attributes=['*']) net_bios_name_entries = self.connection.entries if len(net_bios_name_entries) == 0: raise LDAPAttributeError("NetBos name not found") # If user input netbios name match netbios name searched in LDAP elif net_bios_name != self.connection.entries[0]['nETBIOSName']: raise LDAPInvalidCredentialsResult("Invalid NetBios name") # If dc=server, dc=com is matched to read domain controller elif self.user_search_base != self.connection.entries[0]['nCName']: raise LDAPInvalidCredentialsResult("Invalid NetBios name") search_value = username search_filter = "sAMAccountName" else: search_value = username search_filter = "sAMAccountName" self.connection.search( search_base=self.user_search_base, search_filter='({search_filter}={search_value})'.format( search_filter=search_filter, search_value=search_value), attributes='userPrincipalName') # If no user found if len(self.connection.entries) != 1: raise LDAPInvalidCredentialsResult('Username not found in LDAP') user_principal_name = str(self.connection.entries[0]['userPrincipalName']) self.connection = Connection( self.server, user=user_principal_name, password=password, check_names=True, lazy=False, raise_exceptions=True, auto_bind=True) self.connection.open() try: self.connection.bind() except LDAPInvalidCredentialsResult: raise LDAPInvalidCredentialsResult("Invalid credentials") return True
4,249
soc/rtl/debug/debug_mem.py
mfkiwl/pifive-cpu
6
2172346
from migen import * import math from third_party import wishbone as wb class DebugMemory(Module): def __init__(self, bus=None, debug_bus=None, enable_code=0xABAB12): if bus is None: self.bus = wb.Interface(data_width=32, adr_width=32) else: self.bus = bus if debug_bus is None: self.debug_bus = wb.Interface(data_width=32, adr_width=32) else: self.debug_bus = debug_bus wb_rd_req = self.bus.cyc & self.bus.stb & ~self.bus.ack & ~self.bus.we wb_wr_req = self.bus.cyc & self.bus.stb & ~self.bus.ack & self.bus.we wb_rd_data = Signal(32) self.comb += self.bus.dat_r.eq(wb_rd_data) enable_entry = Signal(32, reset=0) enabled = enable_entry == Constant(enable_code) # 00 = Cfg/Status {22'b0, err, ack, sel[3:0], 2'b0, rd_req, wr_req} # 04 = Addr # 08 = Write Data # 0C = Read Data # 10 = Enable self.sync += [ self.bus.ack.eq(0), self.bus.err.eq(0), self.debug_bus.ack.eq(0), self.debug_bus.err.eq(0), self.debug_bus.dat_r.eq(0), If(self.debug_bus.stb & self.debug_bus.cyc & ~self.debug_bus.ack, self.debug_bus.ack.eq(1), If((self.debug_bus.adr >> 2) == 0, If(enabled & self.debug_bus.we & self.debug_bus.sel[1], If(self.debug_bus.dat_w[8], self.bus.ack.eq(1)). Elif(self.debug_bus.dat_w[9], self.bus.err.eq(1))), self.debug_bus.dat_r.eq(Cat(wb_wr_req, wb_rd_req, Constant(0, bits_sign=2), self.bus.sel))), If((self.debug_bus.adr >> 2) == 1, self.debug_bus.dat_r.eq(self.bus.adr)), If((self.debug_bus.adr >> 2) == 2, self.debug_bus.dat_r.eq(self.bus.dat_w)), If((self.debug_bus.adr >> 2) == 3, If(self.debug_bus.we & self.debug_bus.sel[0], wb_rd_data[0:8].eq(self.debug_bus.dat_w[0:8])), If(self.debug_bus.we & self.debug_bus.sel[1], wb_rd_data[8:16].eq(self.debug_bus.dat_w[8:16])), If(self.debug_bus.we & self.debug_bus.sel[2], wb_rd_data[16:24].eq(self.debug_bus.dat_w[16:24])), If(self.debug_bus.we & self.debug_bus.sel[3], wb_rd_data[24:32].eq(self.debug_bus.dat_w[24:32])), self.debug_bus.dat_r.eq(wb_rd_data)), If((self.debug_bus.adr >> 2) == 4, If(self.debug_bus.we & self.debug_bus.sel[0], enable_entry[0:8].eq(self.debug_bus.dat_w[0:8])), If(self.debug_bus.we & self.debug_bus.sel[1], enable_entry[8:16].eq(self.debug_bus.dat_w[8:16])), If(self.debug_bus.we & self.debug_bus.sel[2], enable_entry[16:24].eq(self.debug_bus.dat_w[16:24])), If(self.debug_bus.we & self.debug_bus.sel[3], enable_entry[24:32].eq(self.debug_bus.dat_w[24:32])), self.debug_bus.dat_r.eq(enable_entry)), ) ]
3,061
apps/users/urls.py
shao-169/SLTP
0
2170343
# _*_ encoding:utf-8 _*_ from django.conf.urls import url from users.views import LoginView,LogoutView,RegisterView,ActiveUserView,IndexView\ ,ForgetPWView,FindPWCordView,UserInfoView,NickNameView,WorkNameView,UploadImageView,MyCourseView,\ DeleteMyCourseView,MyFavCouresView,MyFavTeacherView,MyPhoneView __author__ = 'YZF' __date__ = '2018/3/14,16:26' urlpatterns = [ url(r'^login/', LoginView.as_view(), name='login'), url(r'^register/',RegisterView.as_view(),name='register'), # url(r'forget/(?P<email>.*)*/',ForgetPWView.as_view(),name='forget'), url(r'^forget/', ForgetPWView.as_view(), name='forget'), url(r'^logout/',LogoutView.as_view(),name='logout'), url(r'^forget_cord/',FindPWCordView.as_view(),name='forgetcord'), # 激活用户url url('^active/(?P<active_code>.*)/', ActiveUserView.as_view(), name= "user_active"), url('^info/', UserInfoView.as_view(), name="user_info"), url('^nickname/', NickNameView.as_view(), name="info_nickname"), url('^phone/', MyPhoneView.as_view(), name="info_phone"), url('^workname/', WorkNameView.as_view(), name="info_workname"), url(r'^mycourses/', MyCourseView.as_view(), name='info_courses'), url(r'^favteacher/', MyFavTeacherView.as_view(), name='fav_teacher'), url(r'^myfav/', MyFavCouresView.as_view(), name='info_myfav'), url('^delete/(?P<course_id>.*)/', DeleteMyCourseView.as_view(), name= "course_delete"), url(r'^uploadoimg/', UploadImageView.as_view(), name='image_upload'), ]
1,502
jp.atcoder/abc049/arc065_a/11874696.py
kagemeka/atcoder-submissions
1
2171497
import sys t = set("dream, dreamer, erase, eraser".split(", ")) def obtainable(s): while True: if s[-5:] in t: s = s[:-5] elif s[-6:] in t: s = s[:-6] else: return False if not s: return True s = sys.stdin.readline().rstrip() def main(): print("YES" if obtainable(s) else "NO") if __name__ == "__main__": main()
439
codes_auto/1678.number-of-ways-to-split-a-string.py
smartmark-pro/leetcode_record
0
2169468
# # @lc app=leetcode.cn id=1678 lang=python3 # # [1678] number-of-ways-to-split-a-string # None # @lc code=end
110
pyqt_foldable_toolbar/foldableToolBar.py
yjg30737/pyqt-foldable-toolbar
0
2171894
from PyQt5.QtWidgets import QToolBar, QWidget, QHBoxLayout, QSizePolicy, QAction, QWidgetAction from PyQt5.QtCore import Qt, QPropertyAnimation, QAbstractAnimation from pyqt_svg_button import SvgButton class FoldableToolBar(QToolBar): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.__initUi() def __initUi(self): self.setMovable(False) self.__foldBtn = SvgButton() self.__foldBtn.setIcon('ico/fold.svg') self.__foldBtn.setCheckable(True) self.__foldBtn.toggled.connect(self.__fold) self.__foldBtn.setMaximumWidth(12) cornerWidget = QWidget() lay = QHBoxLayout() lay.addWidget(self.__foldBtn) lay.setAlignment(Qt.AlignRight | Qt.AlignBottom) lay.setContentsMargins(0, 0, 0, 0) cornerWidget.setLayout(lay) cornerWidget.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred) self.__foldAction = QWidgetAction(self) self.__foldAction.setDefaultWidget(cornerWidget) self.addAction(self.__foldAction) self.__menuAnimation = QPropertyAnimation(self, b"height") self.__menuAnimation.valueChanged.connect(self.setFixedHeight) self.__menuAnimation.setStartValue(self.sizeHint().height()) self.__menuAnimation.setDuration(200) # default duration self.__menuAnimation.setEndValue(self.__foldBtn.sizeHint().height()) # default end value def __fold(self, f): if f: self.__menuAnimation.setDirection(QAbstractAnimation.Forward) self.__menuAnimation.start() self.__foldBtn.setIcon('ico/unfold.svg') self.setFixedHeight(self.__foldBtn.sizeHint().height()) else: self.__menuAnimation.setDirection(QAbstractAnimation.Backward) self.__menuAnimation.start() self.__foldBtn.setIcon('ico/fold.svg') self.setFixedHeight(self.sizeHint().height()) def addWidget(self, widget: QWidget) -> QAction: self.insertWidget(self.__foldAction, widget) self.__menuAnimation.setStartValue(self.sizeHint().height())
2,151
leasing/tests/api/test_create_invoice.py
hkotkanen/mvj
0
2171507
import datetime import json from decimal import Decimal import pytest from django.core.serializers.json import DjangoJSONEncoder from django.urls import reverse from django.utils import timezone from leasing.enums import ContactType, TenantContactType from leasing.models import Invoice @pytest.mark.django_db def test_create_invoice(django_db_setup, admin_client, lease_factory, tenant_factory, tenant_rent_share_factory, contact_factory, tenant_contact_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1, start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True) tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1) tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1) contact1 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1, start_date=datetime.date(year=2000, month=1, day=1)) data = { 'lease': lease.id, 'recipient': contact1.id, 'due_date': '2019-01-01', 'rows': [ { 'amount': Decimal(10), 'receivable_type': 1, } ], } url = reverse('invoice-list') response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') assert response.status_code == 201, '%s %s' % (response.status_code, response.data) invoice = Invoice.objects.get(pk=response.data['id']) assert invoice.invoicing_date == timezone.now().date() assert invoice.outstanding_amount == Decimal(10) @pytest.mark.django_db def test_create_invoice_for_tenant(django_db_setup, admin_client, lease_factory, tenant_factory, tenant_rent_share_factory, contact_factory, tenant_contact_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1, start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True) tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1) tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1) contact1 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1, start_date=datetime.date(year=2000, month=1, day=1)) data = { 'lease': lease.id, 'tenant': tenant1.id, 'due_date': '2019-01-01', 'rows': [ { 'amount': Decimal(10), 'receivable_type': 1, } ], } url = reverse('invoice-list') response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') assert response.status_code == 201, '%s %s' % (response.status_code, response.data) invoice = Invoice.objects.get(pk=response.data['id']) assert invoice.invoicing_date == timezone.now().date() assert invoice.outstanding_amount == Decimal(10) assert invoice.recipient == contact1 assert invoice.rows.first().tenant == tenant1 @pytest.mark.django_db def test_create_invoice_for_tenant_with_billing_contact(django_db_setup, admin_client, lease_factory, tenant_factory, tenant_rent_share_factory, contact_factory, tenant_contact_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1, start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True) tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1) tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1) contact1 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1, start_date=datetime.date(year=2000, month=1, day=1)) contact2 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.BILLING, tenant=tenant1, contact=contact2, start_date=datetime.date(year=2000, month=1, day=1)) data = { 'lease': lease.id, 'tenant': tenant1.id, 'due_date': '2019-01-01', 'rows': [ { 'amount': Decimal(10), 'receivable_type': 1, } ], } url = reverse('invoice-list') response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') assert response.status_code == 201, '%s %s' % (response.status_code, response.data) invoice = Invoice.objects.get(pk=response.data['id']) assert invoice.invoicing_date == timezone.now().date() assert invoice.outstanding_amount == Decimal(10) assert invoice.recipient == contact2 assert invoice.rows.first().tenant == tenant1 @pytest.mark.django_db def test_create_invoice_tenant_not_in_lease(django_db_setup, admin_client, lease_factory, tenant_factory, tenant_rent_share_factory, contact_factory, tenant_contact_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1, start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True) tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=1) tenant_rent_share_factory(tenant=tenant1, intended_use_id=1, share_numerator=1, share_denominator=1) contact1 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant1, contact=contact1, start_date=datetime.date(year=2000, month=1, day=1)) lease2 = lease_factory(type_id=1, municipality_id=1, district_id=1, notice_period_id=1, start_date=datetime.date(year=2000, month=1, day=1), is_invoicing_enabled=True) tenant2 = tenant_factory(lease=lease2, share_numerator=1, share_denominator=1) tenant_rent_share_factory(tenant=tenant2, intended_use_id=1, share_numerator=1, share_denominator=1) contact2 = contact_factory(first_name="<NAME>", last_name="<NAME>", type=ContactType.PERSON) tenant_contact_factory(type=TenantContactType.TENANT, tenant=tenant2, contact=contact2, start_date=datetime.date(year=2000, month=1, day=1)) data = { 'lease': lease.id, 'tenant': tenant2.id, 'due_date': '2019-01-01', 'rows': [ { 'amount': Decimal(10), 'receivable_type': 1, } ], } url = reverse('invoice-list') response = admin_client.post(url, data=json.dumps(data, cls=DjangoJSONEncoder), content_type='application/json') assert response.status_code == 400, '%s %s' % (response.status_code, response.data)
7,475
scripts/reactor/banbanNormal.py
G00dBye/YYMS
54
2171866
hitCount = 0 # global hitCount # hitCount += 1 # sm.chat(str(hitCount)) # if hitCount >= 1: sm.spawnMob(9303154, -135, 455, False) sm.removeReactor() sm.dispose()
164
1-HRF-xgb/repre/graphconv.py
iamlockelightning/HIF-KAT
6
2172477
import torch import torch.nn as nn import torch.nn.functional as F from torch_geometric.nn import GCNConv, GATConv class GraphConvLayer(nn.Module): def __init__( self, adj_index, adj_weight, input_dim = 32, output_dim = 16, dropout = 0.4, activation = F.leaky_relu, device = "cuda", text_field = None, share = True, bias = False, residual = False ): assert text_field is not None super(GraphConvLayer, self).__init__() self.share = share self.residual = residual self.fields = text_field[:] self.activation = activation self.dropout = nn.Dropout(dropout) self.adj_index = adj_index self.adj_weight = adj_weight self.gcns = nn.ModuleDict() if residual: self.reslin = nn.ModuleDict() if share: gcn = GCNConv(input_dim, output_dim, cached = False, bias = bias, normalize = True, improved = True) if residual: lin = nn.Linear(input_dim, output_dim, bias = bias) for field in self.fields: self.gcns[field] = gcn if residual: self.reslin[field] = lin else: for field in self.fields: self.gcns[field] = GCNConv( input_dim, output_dim, cached = False, bias = bias, normalize = True, improved = True ) if residual: self.reslin[field] = nn.Linear(input_dim, output_dim, bias = bias) if "cuda" in device: self.cuda() def forward(self, batch): ret = dict() for field in self.fields: rep = batch[field] ### Add Dropout 2020-03-25 ### # rep = self.dropout(rep) ### rep = self.gcns[field](rep, self.adj_index, self.adj_weight) if self.residual: rep += self.reslin[field](batch[field]) rep = self.activation(rep) ret[field] = rep return ret # return {field: self.activation(self.gcns[field](batch[field], self.adj_index, self.adj_weight)) for field in self.fields} class MLP(nn.Module): def __init__( self, input_dim = 32, output_dim = 16, dropout = 0.4, activation = F.leaky_relu, device = "cuda", text_field = None, bias = False ): assert text_field is not None super(MLP, self).__init__() self.fields = text_field[:] self.activation = activation self.dropout = nn.Dropout(dropout) self.mlps = nn.ModuleDict() for field in self.fields: self.mlps[field] = torch.nn.Linear( in_features = input_dim, out_features = output_dim, bias = bias ) if "cuda" in device: self.cuda() def forward(self, batch): ret = dict() for field in self.fields: rep = batch[field] ### Add Dropout 2020-03-25 ### # rep = self.dropout(rep) ### rep = self.mlps[field](rep) rep = self.activation(rep) ret[field] = rep return ret class GraphAtteLayer(nn.Module): def __init__( self, adj_index, adj_weight, input_dim = 32, output_dim = 16, head_number = 8, ### New dropout = 0.4, activation = F.leaky_relu, device = "cuda", text_field = None, share = True, bias = False, residual = False, concat = False ): assert text_field is not None super(GraphAtteLayer, self).__init__() self.share = share self.residual = residual self.fields = text_field[:] self.activation = activation self.dropout = nn.Dropout(dropout) self.adj_index = adj_index self.adj_weight = adj_weight self.gats = nn.ModuleDict() if share: pass else: for field in self.fields: self.gats[field] = GATConv( input_dim, output_dim, head_number, concat = concat, dropout = dropout ) if "cuda" in device: self.cuda() def forward(self, batch): ret = dict() for field in self.fields: rep = batch[field] rep = self.gats[field](rep, self.adj_index) rep = self.activation(rep) ret[field] = rep return ret class HighWay(torch.nn.Module): def __init__(self, f_in, f_out, bias=True): super(HighWay, self).__init__() self.w = Parameter(torch.Tensor(f_in, f_out)) nn.init.xavier_uniform_(self.w) if bias: self.bias = Parameter(torch.Tensor(f_out)) nn.init.constant_(self.bias, 0) else: self.register_parameter('bias', None) def forward(self, ori_input, in_1, in_2): t = torch.mm(ori_input, self.w) if self.bias is not None: t = t + self.bias gate = torch.sigmoid(t) return gate * in_2 + (1.0 - gate) * in_1 class GraphConvHighWayLayer(nn.Module): def __init__( self, adj_index, adj_weight, input_dim = 32, output_dim = 16, dropout = 0.4, activation = F.leaky_relu, device = "cuda", text_field = None, share = True, bias = False, residual = False ): assert text_field is not None super(GraphConvHighWayLayer, self).__init__() self.share = share self.residual = residual self.fields = text_field[:] self.activation = activation self.dropout = nn.Dropout(dropout) self.adj_index = adj_index self.adj_weight = adj_weight self.gcns = nn.ModuleDict() self.reslin = nn.ModuleDict() self.highway_net = nn.ModuleDict() for field in self.fields: self.gcns[field] = GCNConv( input_dim, output_dim, cached = True, bias = bias, normalize = True, improved = True ) self.reslin[field] = nn.Linear(input_dim, output_dim, bias = bias) self.highway_net[field] = HighWay(f_in = input_dim, f_out = output_dim, bias = bias) if "cuda" in device: self.cuda() def forward(self, batch): ret = dict() for field in self.fields: ori_rep = batch[field] ### Add Dropout 2020-03-25 ### # rep = self.dropout(rep) ### gcn_rep = self.gcns[field](rep, self.adj_index, self.adj_weight) res_rep += self.reslin[field](ori_rep) rep = self.highway_net[field](ori_rep, res_rep, gcn_rep) rep = self.activation(rep) ret[field] = rep return ret
7,347
epicteller/core/controller/member.py
KawashiroNitori/epicteller
0
2172041
#!/usr/bin/env python # -*- coding: utf-8 -*- from typing import Optional, Dict, Union, Iterable import bcrypt from epicteller.core.dao.member import MemberDAO, MemberExternalDAO from epicteller.core.model.member import Member from epicteller.core.util import validator from epicteller.core.util.enum import ExternalType def _gen_passhash(password: str) -> str: passhash = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt(rounds=10)).decode('utf8') return passhash async def get_member(member_id: Optional[int]=None, *, url_token: Optional[str]=None, email: Optional[str]=None) -> Optional[Member]: if member_id: return (await MemberDAO.batch_get_member_by_id([member_id])).get(member_id) elif url_token: return (await MemberDAO.batch_get_member_by_url_token([url_token])).get(url_token) elif email: return await MemberDAO.get_member_by_email(email) return None async def batch_get_member(member_ids: Iterable[int]=None, *, url_tokens: Iterable[str]=None) -> Dict[Union[int, str], Member]: if member_ids: return await MemberDAO.batch_get_member_by_id(member_ids) elif url_tokens: return await MemberDAO.batch_get_member_by_url_token(url_tokens) return {} async def check_member_email_password(email: str, password: str) -> Optional[Member]: email = email.lower() member = await get_member(email=email) if not member: return matched = bcrypt.checkpw(password.encode('utf8'), member.passhash.encode('utf8')) if not matched: return return member async def create_member(name: str, email: str, password: str) -> Member: passhash = _gen_passhash(password) email = email.lower() member = await MemberDAO.create_member(name, email, passhash) external_id = validator.parse_external_id_from_qq_email(email) if external_id: await bind_member_external_id(member.id, ExternalType.QQ, external_id) return member async def change_member_password(member_id: int, password: str): passhash = _gen_passhash(password) await MemberDAO.update_member(member_id, passhash=passhash) async def bind_member_external_id(member_id: int, external_type: ExternalType, external_id: str) -> None: await MemberExternalDAO.bind_member_external_id(member_id, external_type, external_id) async def unbind_member_external_id(member_id: int, external_type: ExternalType) -> None: await MemberExternalDAO.unbind_member_external_id(member_id, external_type) async def get_member_externals(member_id: int) -> Dict[ExternalType, str]: return await MemberExternalDAO.get_external_ids_by_member(member_id) async def get_member_by_external(external_type: ExternalType, external_id: str) -> Optional[Member]: member_id = await MemberExternalDAO.get_member_id_by_external(external_type, external_id) if not member_id: return None return await get_member(member_id) async def update_member(member_id: int, **kwargs): await MemberDAO.update_member(member_id, **kwargs)
3,110
tests/test_lipid_tilt.py
blakeaw/ORBILT
11
2172110
from __future__ import print_function import pybilt.bilayer_analyzer.bilayer_analyzer as ba def test_lipid_tilt(): analyzer = ba.BilayerAnalyzer(structure='../pybilt/sample_bilayer/sample_bilayer.psf', trajectory='../pybilt/sample_bilayer/sample_bilayer_10frames.dcd', selection="resname POPC DOPE TLCL2") #remove the default msd analysis analyzer.remove_analysis('msd_1') analyzer.add_analysis("lipid_tilt lt leaflet lower resname POPC style order ref_axis z") analyzer.adjust_rep_setting('vector_frame', 'ref_atoms', {'DOPE':{'start': ['C218','C318'], 'end':'P'}, 'POPC':{'start':['C218', 'C316'], 'end':'P'}, 'TLCL2': {'start':['CA18','CB18','CC18', 'CD18'], 'end':['P1', 'P3']}}) analyzer.run_analysis() lt_dat = analyzer.get_analysis_data('lt') print('Lipid Tilts (vs time):') print(lt_dat) if __name__ == '__main__': test_lipid_tilt()
1,192
forNSF/soupextract.py
ctames/conference-host
1
2172486
from bs4 import BeautifulSoup, SoupStrainer import urllib2 from urlparse import urljoin def extractPdf(url, levels): outurls = [] #pdfs found within n levels thiscycle = [url] #List of urls to look through on current loop iteration linkstrainer = SoupStrainer("a") hdr = {'User-Agent': 'Mozilla/5.0'} for i in range (0, levels): nextcycle = [] #Urls to look at in next loop iteration for currurl in thiscycle: print currurl request = urllib2.Request(currurl, headers=hdr) try: page = urllib2.urlopen(request) except: print 'fuck' continue soup = BeautifulSoup(page, parse_only=linkstrainer) for link in soup.find_all('a'): linkurl = link.get('href') if not linkurl: continue elif linkurl[-4:] == '.pdf': print linkurl if linkurl[:4] == 'http': outurls.append(linkurl) else: finalurl = urljoin(currurl, linkurl) outurls.append(finalurl) elif i != levels-1: if linkurl[:4] == 'http': nextcycle.append(linkurl) else: finalurl = urljoin(currurl, linkurl) nextcycle.append(finalurl) if not nextcycle: return outurls thiscycle = nextcycle[:] return outurls
1,568
.Python Challenges - 101Computing/Random Library Challenges/postcode.py
Gustavo-daCosta/Projetos
2
2172305
#Your task is to write a Python script to generates a random UK postcode in the format: LetterLetterNumber_NumberLetterLetter. from random import randint postcode = '' for position in range(0, 7): if position == 2 or position == 4: postcode += str(randint(0, 9)) elif position == 3: postcode += ' ' else: postcode += str(chr(randint(65, 90))) print(f'Postcode: {postcode}')
412
authentication/urls.py
funsojoba/SendMe_finance_api
0
2171739
from django.urls import path from .views.register import RegisterView from .views.login import LoginView urlpatterns = [ path('register/', RegisterView.as_view()), path('login/', LoginView.as_view()) ]
212
Models/zodesolve.py
lefthandedroo/Cosmodels
0
2172304
7#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Feb 15 13:38:48 2018 @author: BallBlueMeercat """ from scipy.integrate import odeint from scipy.interpolate import interp1d import firstderivs_cython as f import numpy as np firstderivs_functions = { 'rainbow':f.rainbow, 'niagara':f.niagara, 'kanangra':f.kanangra, 'waterfall':f.waterfall, 'stepfall':f.stepfall, 'exotic':f.exotic, 'late_intxde':f.late_intxde, 'heaviside_late_int':f.heaviside_late_int, 'heaviside_sudden':f.heaviside_sudden, 'late_int':f.late_int, 'expgamma':f.expgamma, 'txgamma':f.txgamma, 'zxgamma':f.zxgamma, 'gamma_over_z':f.gamma_over_z, 'zxxgamma':f.zxxgamma, 'gammaxxz':f.gammaxxz, 'rdecay_m':f.rdecay_m, 'rdecay_de':f.rdecay_de, 'rdecay_mxde':f.rdecay_mxde, 'rdecay':f.rdecay, 'interacting':f.interacting, 'LCDM':f.LCDM, 'rLCDM':f.rLCDM } def zodesolve(names, values, zpicks, model, plot_key, interpolate=False): """ Takes in: names = list of strings, names of parameters to be fitted; values = np.array, values of parameters; zpicks = np.ndarray of redshifts ; model = string, name of model being tested. """ all_zpicks = zpicks if len(zpicks) > 1048: # larger than pantheon sample interpolate = True zpicks = np.linspace(zpicks[0], zpicks[-1], num=100, endpoint=True) # Inserting 0 at the front of redshifts to use initial conditions. zpicks = np.insert(zpicks, 0, 0.0) # Standard cosmological parameters. H0 = 1.0 c = 1.0 c_over_H0 = 4167 * 10**6 # c/H0 in parsecs # Initial conditions at z = 0 (now). t0 = 0.0 # time a0 = 1.0 # scale factor z0 = 0.0 # redshift dl0 = 0.0 # luminosity distance rho_c0 = H0**2 # critical density # Pack up the initial conditions and interaction terms. int_terms = [] if model == 'rainbow': int_in = 12 elif model == 'niagara': int_in = 10 elif model == 'kanangra': int_in = 8 elif model == 'waterfall': int_in = 6 elif model == 'stepfall': int_in = 4 elif model == 'exotic': int_in = 3 elif model == 'LCDM' or model == 'rLCDM': int_in = len(values) else: int_in = 2 int_terms = values[int_in:] fluids = values[1:int_in] ombar_de0 = rho_c0/rho_c0 - np.sum(fluids) t0a0 = np.array([t0, a0]) de0z0dl0 = np.array([ombar_de0, z0, dl0]) # Remember that you lost precision when concatenating arr over using a list. v0 = np.concatenate((t0a0, fluids, de0z0dl0)) # Extracting the parsed mode of interaction. firstderivs_function = firstderivs_functions.get(model,0) assert firstderivs_function != 0, "zodesolve doesn't have this firstderivs_key at the top" # Call the ODE solver with all zpicks or cut_zpicks if len(zpicks) > 2000. vsol = odeint(firstderivs_function, v0, zpicks, args=(int_terms,H0), mxstep=5000000, atol=1.0e-8, rtol=1.0e-6) z = vsol[1:,-2] dl = vsol[1:,-1] * (1+z) # in units of dl*(H0/c) da = dl * (1.0+z)**(-2.0) # in units of dl*(H0/c) dlpc = dl * c_over_H0 # dl in parsecs (= vsol[dl] * c/H0) dapc = dlpc * (1.0+z)**(-2.0) # in units of pc dapc = dapc / 10**3 # in units of kpc # integrated_dlpc = dlpc plot_var = {} if plot_key: # Separate results into their own arrays: plot_var['t'] = vsol[1:,0] plot_var['a'] = vsol[1:,1] # Collecting fluids and their names for plotting: fluid_arr = np.zeros(((int_in), (len(zpicks)-1))) fluid_names = [] for i in range((int_in-1)): fluid_names.append(names[i+1]) fluid_arr[i] = vsol[1:,(i+2)] fluid_names.append('de_ombar') fluid_arr[-1] = vsol[1:,-3] plot_var['fluid_names'] = fluid_names plot_var['fluid_arr'] = fluid_arr plot_var['z'] = z plot_var['dl'] = dl # in units of dl*(H0/c) plot_var['int_terms'] = int_terms plot_var['da'] = da Hz = H0 * (np.sum(fluid_arr, axis=0))**(0.5) plot_var['Hz'] = Hz daMpc = dlpc/10**6 * (1.0+z)**(-2.0) # in units of dl in Mpc*(H0/c) dV = (daMpc**2 * c*z/Hz)**(1/3) # combines radial and transverse dilation plot_var['dV'] = dV if interpolate: # Interpolating results to give output for all zpicks: interp_dlpc = interp1d(zpicks[1:], dlpc) interp_da = interp1d(zpicks[1:], da) dlpc = interp_dlpc(all_zpicks) da = interp_da(all_zpicks) # return dlpc, da, z, integrated_dlpc, plot_var return dlpc, da, plot_var
4,882
app/models/cluster_model.py
altmirai/piggycli
0
2172359
import app.utilities.ssh as ssh import json class Cluster: def __init__(self, client, id): self.client = client self.id = id @classmethod def all(cls, client): resp = client.describe_clusters() return resp['Clusters'] @property def hsms(self): return self.read()['Hsms'] @property def azs(self): subnet_mapping = self.read()['SubnetMapping'] azs = [] for key, value in subnet_mapping.items(): azs.append(key) return azs @property def csr(self): return self.read()['Certificates']['ClusterCsr'] @property def state(self): return self.read()['State'] def initialize(self, certs): assert self.state == 'UNINITIALIZED', 'Cluster state is not UNITIALIZED' assert certs.valid, 'Certificates not valid' self.client.initialize_cluster( ClusterId=self.id, SignedCert=certs.pem_hsm_cert.decode('UTF-8'), TrustAnchor=certs.pem_ca_cert.decode('UTF-8') ) return def activate( self, instance, crypto_officer_username, crypto_officer_password, crypto_user_username, crypto_user_password, ssh_key): eni_ip = self.hsms[0]['EniIp'] resp_json = ssh.activate_cluster( ip_address=instance.public_ip_address, ssh_key_file_path=ssh_key.ssh_key_file_path, eni_ip=eni_ip, crypto_officer_username=crypto_officer_username, crypto_officer_password=<PASSWORD>, crypto_user_username=crypto_user_username, crypto_user_password=<PASSWORD> ) resp = json.loads(resp_json) assert resp.get( 'error') is None, f"Activate cluster error: {resp['error']}" assert resp['crypto_officer']['username'] == crypto_officer_username assert resp['crypto_officer']['password'] == <PASSWORD>officer_password return True def read(self): resp = self.client.describe_clusters( Filters={'clusterIds': [self.id]}) return resp['Clusters'][0] def destroy(self): return False
2,176
stage2_cINN/AE/modules/LPIPS.py
CJWBW/image2video-synthesis-using-cINNs
85
2172377
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models""" import torch import torch.nn as nn from stage2_cINN.AE.modules.vgg16 import vgg16, normalize_tensor, spatial_average from stage2_cINN.AE.modules.ckpt_util import get_ckpt_path class LPIPS(nn.Module): # Learned perceptual metric def __init__(self, use_dropout=True): super().__init__() self.scaling_layer = ScalingLayer() self.chns = [64, 128, 256, 512, 512] self.net = vgg16(pretrained=True, requires_grad=False) self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout) self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout) self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout) self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout) self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout) self.load_from_pretrained() for param in self.parameters(): param.requires_grad = False def load_from_pretrained(self, name="vgg_lpips"): ckpt = get_ckpt_path(name) self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) print("loaded pretrained LPIPS loss from {}".format(ckpt)) @classmethod def from_pretrained(cls, name="vgg_lpips"): if name is not "vgg_lpips": raise NotImplementedError model = cls() ckpt = get_ckpt_path(name) model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False) return model def forward(self, input, target): in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target)) outs0, outs1 = self.net(in0_input), self.net(in1_input) feats0, feats1, diffs = {}, {}, {} lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4] for kk in range(len(self.chns)): feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk]) diffs[kk] = (feats0[kk] - feats1[kk]) ** 2 res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))] val = res[0] for l in range(1, len(self.chns)): val += res[l] return val class ScalingLayer(nn.Module): def __init__(self): super(ScalingLayer, self).__init__() self.register_buffer('shift', torch.Tensor([-.030, -.088, -.188])[None, :, None, None]) self.register_buffer('scale', torch.Tensor([.458, .448, .450])[None, :, None, None]) def forward(self, inp): return (inp - self.shift) / self.scale class NetLinLayer(nn.Module): """ A single linear layer which does a 1x1 conv """ def __init__(self, chn_in, chn_out=1, use_dropout=False): super(NetLinLayer, self).__init__() layers = [nn.Dropout(), ] if (use_dropout) else [] layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ] self.model = nn.Sequential(*layers)
3,048
hello-world/comments.py
selvendiranj/python-tutorial
0
2172389
""" understood various blocks even if they are without braces """ # First comment print ("Hello, Python!" # second comment) name = "Madisetti" # This is again comment # This is a comment. # This is a comment, too. # This is a comment, too. # I said that already.
266
benchmarks_sphere/paper_jrn_jfm_ppeixoto/benchmark_specific_settings.py
valentinaschueller/sweet
6
2170357
# --------------------------------------------- # Class to setup spherical modes initialization # author: <NAME> <<EMAIL>> # Oct 2021 # ---------------------------------------- import numpy as np import pickle from numpy.lib.function_base import append import pandas as pd import re import os import os.path import matplotlib #matplotlib.use('TkAgg') import matplotlib.pyplot as plt import matplotlib.colors as colors from matplotlib.lines import Line2D import matplotlib.ticker as mtick from mule.postprocessing.JobData import * #----------------------------------------------------------------- # Test Cases for different settings of modes initialization # > Used for pre-processing, to creat jobs for sweet #----------------------------------------------------------------- class modes_TC1: #Init with energy in full shells from n_ini to n_end def __init__(self, n_ini, n_end, m_ini, alpha_min, alpha_max, alpha_samples): self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False) # Select shells for initial energy # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral) # n defines the shell self.nmodes=[] self.mmodes=[] self.ampls=[] self.n_ini = n_ini self.n_end = n_end self.m_ini = m_ini count_modes = 0 code="" for n in range(n_ini, n_end+1): for m in range(m_ini, n+1): self.nmodes.append(n) self.mmodes.append(m) self.ampls.append(1.0) count_modes+=1 self.count_modes = count_modes codes = [] print() print("Mode init params:") for a in self.alpha: print() print("alpha = ", a) print("i n m amp") code = str(self.count_modes) for i in range(self.count_modes): code+="_"+str(self.nmodes[i])+"_"+str(self.mmodes[i])+"_"+str(a*self.ampls[i]) print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i]) codes.append(code) self.codes = codes print(codes) def save_file(self, filename): with open(filename, 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) class modes_TC2: #list of initial modes def __init__(self, n_list, m_list, alpha_min, alpha_max, alpha_samples, back_n_min=0, back_n_max=0, back_ampl=0.1): self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False) # Select shells for initial energy # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral) # n defines the shell self.nmodes=n_list self.mmodes=m_list self.ampls=[] self.n_ini = min(n_list) self.n_end = max(n_list) self.m_ini = min(m_list) count_modes = 0 code="" for n in n_list: self.ampls.append(1.0) count_modes+=1 self.count_modes = count_modes list_modes = count_modes #add energy on other modes (background energy) n_ini = back_n_min n_end = back_n_max m_ini = 0 if n_ini != 0 and n_end != 0: for n in range(n_ini, n_end+1): for m in range(m_ini, n+1): if (n,m) in zip(n_list, m_list): continue else: self.nmodes.append(n) self.mmodes.append(m) self.ampls.append(back_ampl) count_modes+=1 self.count_modes = count_modes codes = [] print() print("Mode init params:") for a in self.alpha: print() print("alpha = ", a) print("i n m amp") code = str(self.count_modes) for i in range(self.count_modes): if i < list_modes: code+="_"+str(self.nmodes[i])+"_"+str(self.mmodes[i])+"_"+str(a*self.ampls[i]) print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i]) else: code+="_"+str(self.nmodes[i])+"_"+str(self.mmodes[i])+"_"+str(self.ampls[i]) print(i, self.nmodes[i], self.mmodes[i], self.ampls[i]) codes.append(code) self.codes = codes print(codes) def save_file(self, filename): with open(filename, 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) class modes_TC3: #list of initial modes and list of background modes def __init__(self, n_list, m_list, n_list_back, m_list_back, alpha_min, alpha_max, alpha_samples, back_n_min=0, back_n_max=0, back_ampl=0.1): self.alpha = np.linspace(alpha_min, alpha_max, alpha_samples, endpoint=False) # Select shells for initial energy # Remember n >= m, and m=n, ..., N, where N it the max wavenumber (space_res_spectral) # n defines the shell self.nmodes=n_list+n_list_back self.mmodes=m_list+m_list_back self.ampls=[] self.n_ini = min(n_list) self.n_end = max(n_list) self.m_ini = min(m_list) count_modes = 0 code="" for n in n_list: self.ampls.append(1.0) count_modes+=1 self.count_modes = count_modes list_modes = count_modes for n in n_list_back: self.ampls.append(1.0) count_modes+=1 self.count_modes = count_modes #add energy on other modes (background energy) n_ini = back_n_min n_end = back_n_max m_ini = 0 if n_ini != 0 and n_end != 0: for n in range(n_ini, n_end+1): for m in range(m_ini, n+1): if (n,m) in zip(self.nmodes, self.mmodes): continue else: self.nmodes.append(n) self.mmodes.append(m) self.ampls.append(back_ampl) count_modes+=1 self.count_modes = count_modes codes = [] print() print("Mode init params:") for a in self.alpha: print() print("alpha = ", a) print("i n m amp") code = str(self.count_modes) for i in range(self.count_modes): if i < list_modes: code+="_"+str(self.nmodes[i])+"_"+str(self.mmodes[i])+"_"+str(a*self.ampls[i]) print(i, self.nmodes[i], self.mmodes[i], a*self.ampls[i]) else: code+="_"+str(self.nmodes[i])+"_"+str(self.mmodes[i])+"_"+str(self.ampls[i]) print(i, self.nmodes[i], self.mmodes[i], self.ampls[i]) codes.append(code) self.codes = codes print(codes) def save_file(self, filename): with open(filename, 'wb') as f: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) #Read a test case object for post-processing def load_file(filename): f = open(filename, 'rb') obj = pickle.load(f) f.close() return obj
7,693
nbx/nbmanager/tagged_gist/tests/test_notebook_gist.py
dalejung/nbx
2
2171869
from ..notebook_gisthub import NotebookGistHub from ..gisthub import GistHub from .test_gisthub import generate_gisthub from nbx.tools import assert_items_equal from nbx.nbmanager.tests.common import ( hub, require_github, make_notebookgist, ) class TestNotebookGist: def test_notebookgist(self): nb = make_notebookgist() assert nb.suffix == "[123].ipynb" assert nb.key_name == "Test Gist [123].ipynb" # test pass through via __getattr__ assert nb.id == 123 assert_items_equal(nb.files.keys(), ['a.ipynb', 'b.ipynb', 'test.txt']) def test_strip_gist_id(self): nb = make_notebookgist() key_name = nb.key_name name = nb.strip_gist_id(key_name) assert nb.name == name def test_key_name(self): " Test that key_name rebuilds when name is changed " nb = make_notebookgist() nb.name = "test" assert nb.key_name == "test [123].ipynb" def test_notebook_content(self): nb = make_notebookgist() content = nb.notebook_content assert content == "a.ipynb content" nb.notebook_content = 'new nb content' assert nb.notebook_content == 'new nb content' def test_generate_payload(self): nb = make_notebookgist() payload = nb._generate_payload() assert_items_equal(payload['files'].keys(), ['a.ipynb']) nb.notebook_content = 'new nb content' assert nb.notebook_content == 'new nb content' def test_generate_description(self): """ NotebookGist._generate_description will generate a proper description string to reflect name, active, and tags """ nb = make_notebookgist() # make sure notebook isn't in tags assert '#notebook' not in nb.tags desc = nb._generate_description() # the description should insert the #notebook tag assert '#notebook' in desc # test that inactive gets added assert '#inactive' not in desc nb.active = False test = nb._generate_description() assert '#inactive' in test # change name nb.name = "WOO" test = nb._generate_description() assert test == "WOO #notebook #inactive #pandas #woo" # change tags nb.tags = ["#newtag"] test = nb._generate_description() assert test == "WOO #notebook #inactive #newtag" def test_get_revision_content(self): nb = make_notebookgist() revisions = nb.revisions # a.ipynb is only revision 0 and 1 keys = map(lambda x: x['id'], revisions) assert list(keys) == [0, 1] assert nb.get_revision_content(0) == "a.ipynb_0_revision_content" assert nb.get_revision_content(1) == "a.ipynb_1_revision_content" def test_save(self): # test content/name change nb = make_notebookgist() gisthub = nb.gisthub nb.notebook_content = 'test' nb.name = "BOB" gisthub.save(nb) assert nb.gist.edit.call_count == 1 args = nb.gist.edit.call_args[0] fo = args[1]['a.ipynb'] assert fo._InputFileContent__content == 'test' assert args[0] == "BOB #notebook #pandas #woo" nb.active = False gisthub.save(nb) assert nb.gist.edit.call_count == 2 args = nb.gist.edit.call_args[0] fo = args[1]['a.ipynb'] assert fo._InputFileContent__content == 'test' assert args[0] == "BOB #notebook #inactive #pandas #woo" def setup_notebookgisthub(): names = [ "Test gist #frank #notebook", "Frank bob number 2 #frank #bob #notebook", "bob inactive #bob #inactive #notebook", "bob twin #bob #twin #notebook", "bob twin #bob #twin #notebook", "not a notebook #bob", ] gh = generate_gisthub(names) ngh = NotebookGistHub(gh) return ngh class TestNotebookGistHub: def test_query(self): ngh = setup_notebookgisthub() results = ngh.query('#bob') test = results['#bob'] for key, gist in test.items(): # make sure we are keying by keyname and not gist.id assert key == gist.key_name names = [gist.name for gist in test.values()] # test that we always check for #notebook via filter_tag assert 'not a notebook' not in names assert '#notebook' not in results.keys() @require_github def test_live_query(self): gisthub = GistHub(hub) nbhub = NotebookGistHub(gisthub) nbhub.query()
4,620
scripts/evaluate-script/run_evaluation.py
scc-usc/covid19-forecast-bench
3
2169523
import os import datetime import shutil import evaluate import evaluate_eu models = [] # Evaluate US with open("models.txt", "w") as f: for directory in os.listdir("../../formatted-forecasts/US-COVID/state-death/"): if os.path.isdir("../../formatted-forecasts/US-COVID/state-death/" + directory): models.append(directory) f.write(directory + '\n') with open("forecasts_filenames.txt", "w") as f: for m in models: if os.path.isdir("../../formatted-forecasts/US-COVID/state-death/" + m): for csv in os.listdir("../../formatted-forecasts/US-COVID/state-death/" + m): date_num = (datetime.datetime.now() - datetime.datetime(2020, 1, 22)).days for i in range(32): date_num -= 1 if "_{}.csv".format(date_num) in csv: f.write(csv + '\n') evaluate.run() shutil.rmtree("../../evaluation/US-COVID/") shutil.copytree("./output/", "../../evaluation/US-COVID/") for directory in os.listdir("./output/"): if os.path.isdir("./output/{}".format(directory)): shutil.rmtree("./output/{}".format(directory)) else: os.remove("./output/{}".format(directory)) # Clear txt files. open("models.txt", 'w').close() open("forecasts_filenames.txt", 'w').close() # Evaluate EU models.clear() with open("models.txt", "w") as f: for directory in os.listdir("../../formatted-forecasts/EU-COVID/eu-death/"): if os.path.isdir("../../formatted-forecasts/EU-COVID/eu-death/" + directory): models.append(directory) f.write(directory + '\n') with open("forecasts_filenames.txt", "w") as f: for m in models: if os.path.isdir("../../formatted-forecasts/EU-COVID/eu-death/" + m): for csv in os.listdir("../../formatted-forecasts/EU-COVID/eu-death/" + m): date_num = (datetime.datetime.now() - datetime.datetime(2020, 1, 22)).days for i in range(32): date_num -= 1 if "_{}.csv".format(date_num) in csv: f.write(csv + '\n') evaluate_eu.run() shutil.rmtree("../../evaluation/EU-COVID/") shutil.copytree("./output/", "../../evaluation/EU-COVID/") for directory in os.listdir("./output/"): if os.path.isdir("./output/{}".format(directory)): shutil.rmtree("./output/{}".format(directory)) else: os.remove("./output/{}".format(directory)) # Clear txt files. open("models.txt", 'w').close() open("forecasts_filenames.txt", 'w').close()
2,555
demo/myimages/imagefiles/apps.py
ResonantGeoData/django-large-image
4
2170705
import logging from django.apps import AppConfig from django.conf import settings class MyImagesConfig(AppConfig): name = 'myimages.imagefiles' verbose_name = 'My Image Files' def ready(self): if not getattr(settings, 'DEBUG', False): logging.getLogger('gdal').setLevel(logging.ERROR) logging.getLogger('large_image').setLevel(logging.ERROR) logging.getLogger('tifftools').setLevel(logging.ERROR) logging.getLogger('pyvips').setLevel(logging.ERROR) logging.getLogger('PIL').setLevel(logging.ERROR)
582
tests/test_wrappers.py
lsnty5190/torchmd-net
0
2172224
import pytest from pytest import mark from torchmdnet import models from torchmdnet.models.model import create_model from torchmdnet.models.wrappers import AtomFilter from utils import load_example_args, create_example_batch @mark.parametrize("remove_threshold", [-1, 2, 5]) @mark.parametrize("model_name", models.__all__) def test_atom_filter(remove_threshold, model_name): # wrap a representation model using the AtomFilter wrapper model = create_model(load_example_args(model_name, remove_prior=True)) model = model.representation_model model = AtomFilter(model, remove_threshold) z, pos, batch = create_example_batch(n_atoms=100) x, v, z, pos, batch = model(z, pos, batch, None, None) assert (z > remove_threshold).all(), ( f"Lowest updated atomic number is {z.min()} but " f"the atom filter is set to {remove_threshold}" ) assert len(z) == len( pos ), "Number of z and pos values doesn't match after AtomFilter" assert len(z) == len( batch ), "Number of z and batch values doesn't match after AtomFilter"
1,094
visuals/apps.py
mujeebishaque/visual-analytics
0
2170185
from django.apps import AppConfig class VisualsConfig(AppConfig): default_auto_field = 'django.db.models.BigAutoField' name = 'visuals'
146
index.py
Max-C-G/review-app
0
2171539
from flask import Flask, render_template from flask import request from sklearn.naive_bayes import MultinomialNB # from sklearn.metrics import mean_squared_error as mse from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import math import os import pickle # load the model from disk model_file = 'nb_model_final.sav' vocab_file = 'vocabulary.p' nb_classifier = pickle.load(open(model_file, 'rb')) vocab = pickle.load(open(vocab_file, 'rb')) app = Flask(__name__) @app.route('/', methods=['GET']) def get_index(): # print('testing') return render_template('index.html') @app.route('/', methods=['POST']) def predict_score(): # print('review: ', request.form['review']) review = request.form['review'] clf = nb_classifier # print(result) count_vect_test = CountVectorizer(vocabulary=vocab) tester_counts = count_vect_test.fit_transform([review]) tfidf_transformer = TfidfTransformer() tester_tfidf = tfidf_transformer.fit_transform(tester_counts) prediction = clf.predict(tester_tfidf) print('prediction: ', prediction) return render_template('index.html', review = request.form['review'], rating = str(prediction[0]))
1,348
controllers/rvoavoidercontroller.py
gavincangan/alvin
0
2171618
from controller import Controller import rvo2, pyglet from common import Twist, M_TO_PIXELS, MAX_LINEAR_SPEED, MAX_ANGULAR_SPEED from math import cos, sin, sqrt, pi, atan2 """ A reactive collision avoidance strategy which makes use of the RVO2 library. Important: All units in pixels! """ class RVOAvoiderController(Controller): NUMBER_PREF_VELS = 11 ANGLE_MIN = -pi/2.0 ANGLE_MAX = pi/2.0 SIM_STEPS = 1 def __init__(self, sim_steps=1): self.SIM_STEPS = sim_steps # Angles of preferred velocities that will be tested each iteration. angles = [] angle_delta = (self.ANGLE_MAX - self.ANGLE_MIN) / \ (self.NUMBER_PREF_VELS - 1) for i in range(self.NUMBER_PREF_VELS): angles.append(self.ANGLE_MIN + i * angle_delta) self.pref_vels = [] for angle in angles: self.pref_vels.append((MAX_LINEAR_SPEED * cos(angle), \ MAX_LINEAR_SPEED * sin(angle))) self.last_index = angles.index(0) self.last_mag = float('inf') def draw_line_from_robot(self, robot, vx, vy, red, green, blue, thickness): x1 = (robot.body.position.x) y1 = (robot.body.position.y) world_angle = robot.body.angle + atan2(vy, vx) mag = sqrt(vx*vx + vy*vy) x2 = int(robot.body.position.x + mag * cos(world_angle)) y2 = int(robot.body.position.y + mag * sin(world_angle)) pyglet.gl.glLineWidth(thickness) pyglet.graphics.draw(2, pyglet.gl.GL_LINES, ('v2f', (x1, y1, x2, y2)), ('c3B', (red, green, blue, red, green, blue))) pyglet.gl.glLineWidth(1) def react(self, robot, sensor_suite, visualize=False): range_scan = sensor_suite.range_scan #puck_scan = sensor_suite.puck_scan # We seem to have to create a new simulator object each time because # otherwise it would contain the obstacles from the last time step. # If there was a 'removeObstacle' method it would be a bit nicer. sim = rvo2.PyRVOSimulator(1/60., # Time step 1.5, # neighborDist 5, # maxNeighbors 1.5, # timeHorizon (other agents) 1.5, #2 # timeHorizon (obstacles) robot.radius, # agent radius MAX_LINEAR_SPEED) # agent max speed agent = sim.addAgent((0, 0)) # Add range scan points as obstacles for the RVO simulator n = len(range_scan.ranges) points = [] for i in range(0, n): rho = range_scan.INNER_RADIUS + range_scan.ranges[i] #if not (rho == float('inf') or isnan(rho)): theta = range_scan.angles[i] points.append((rho*cos(theta), rho*sin(theta))) # Add pucks from the puck scan #for puck in puck_scan.pucks: # rho = puck.distance # theta = puck.angle # points.append((rho*cos(theta), rho*sin(theta))) # Add fake points behind the robot to make it think twice about going # backwards. #n_fake = 0 #start_angle = range_scan.ANGLE_MAX #stop_angle = range_scan.ANGLE_MIN + 2*pi #angle_delta = (stop_angle - start_angle) / (n_fake - 1) #for i in range(n_fake): # theta = start_angle + i * angle_delta # rho = 2 * robot.radius # points.append((rho*cos(theta), rho*sin(theta))) # if visualize: # vx,vy = rho*cos(theta), rho*sin(theta) # self.draw_line_from_robot(robot, vx, vy, 0, 0, 255, 1) # The scan points will be treated together as a single "negative" # obstacle, with vertices specified in CW order. This requires the # following sort. points.sort(key = lambda p: -atan2(p[1], p[0])) sim.addObstacle(points) sim.processObstacles() # Get the velocity in the robot reference frame with the clockwise # rotation matrix cos_theta = cos(robot.body.angle) sin_theta = sin(robot.body.angle) cur_vx = robot.body.velocity.x * cos_theta + \ robot.body.velocity.y * sin_theta cur_vy = -robot.body.velocity.x * sin_theta + \ robot.body.velocity.y * cos_theta # To prevent oscillation we will generally just test the preferred # velocities in the immediate neighbourhood (within the pref_vels list) # of the preferred velocity chosen last time. if self.last_mag < 20: # Last time the magnitude of the chosen velocity was very low. # Do a full search over the preferred velocities. start_index = 0 stop_index = self.NUMBER_PREF_VELS - 1 elif self.last_index == 0: start_index = 0 stop_index = 1 elif self.last_index == len(self.pref_vels)-1: start_index = self.NUMBER_PREF_VELS - 2 stop_index = self.NUMBER_PREF_VELS - 1 else: # This is the general case. start_index = self.last_index - 1 stop_index = self.last_index + 1 highest_mag = 0 chosen_vel = None chosen_index = None for i in range(start_index, stop_index+1): pref_vel = self.pref_vels[i] # Initializing from scratch each time sim.setAgentPosition(agent, (0, 0)) sim.setAgentVelocity(agent, (cur_vx, cur_vy)) sim.setAgentPrefVelocity(agent, pref_vel) for j in range(self.SIM_STEPS): sim.doStep() (vx, vy) = sim.getAgentVelocity(0) #print "vel: {}, {}".format(vx, vy) if visualize: self.draw_line_from_robot(robot, vx, vy, 255, 255, 255, 3) mag = sqrt(vx*vx + vy*vy) if mag > highest_mag: highest_mag = mag chosen_vel = (vx, vy) chosen_index = i self.last_index = chosen_index self.last_mag = highest_mag #print "highest_mag: {}".format(highest_mag) #chosen_vel = (avg_vx / len(self.pref_vels), # avg_vy / len(self.pref_vels)) if visualize and chosen_vel != None: self.draw_line_from_robot(robot, chosen_vel[0], chosen_vel[1], 255, 0, 127, 5) #print "MAX_LINEAR_SPEED: {}".format(MAX_LINEAR_SPEED) #print "current_vel: {}, {}".format(cur_vx, cur_vy) #print "MAG OF current_vel: {}".format(sqrt(cur_vx**2+ cur_vy**2)) #print "chosen_vel: {}, {}".format(chosen_vel[0], chosen_vel[1]) #print "MAG OF chosen_vel: {}".format(sqrt(chosen_vel[0]**2+ chosen_vel[1]**2)) # Now treat (vx, vy) as the goal and apply the simple control law twist = Twist() if chosen_vel != None: twist.linear = 0.1 * chosen_vel[0] twist.angular = 0.02 * chosen_vel[1] else: print "NO AVAILABLE VELOCITY!" #for r in range_scan.ranges: # print r return twist
7,253
management_api_app/api/dependencies/database.py
LizaShak/AzureTRE
2
2172133
import logging from typing import Callable, Type from azure.cosmos import CosmosClient from fastapi import Depends, FastAPI, HTTPException from starlette.requests import Request from starlette.status import HTTP_503_SERVICE_UNAVAILABLE from core import config from db.errors import UnableToAccessDatabase from db.repositories.base import BaseRepository from resources import strings def connect_to_db() -> CosmosClient: logging.debug(f"Connecting to {config.STATE_STORE_ENDPOINT}") try: if config.DEBUG: # ignore TLS(setup is pain) when on dev container and connecting to cosmosdb on windows host. cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY, connection_verify=False) else: cosmos_client = CosmosClient(config.STATE_STORE_ENDPOINT, config.STATE_STORE_KEY) logging.debug("Connection established") return cosmos_client except Exception as e: logging.debug(f"Connection to state store could not be established: {e}") def get_db_client(app: FastAPI) -> CosmosClient: if not app.state.cosmos_client: app.state.cosmos_client = connect_to_db() return app.state.cosmos_client def get_db_client_from_request(request: Request) -> CosmosClient: return get_db_client(request.app) def get_repository(repo_type: Type[BaseRepository]) -> Callable[[CosmosClient], BaseRepository]: def _get_repo(client: CosmosClient = Depends(get_db_client_from_request)) -> BaseRepository: try: return repo_type(client) except UnableToAccessDatabase: raise HTTPException(status_code=HTTP_503_SERVICE_UNAVAILABLE, detail=strings.STATE_STORE_ENDPOINT_NOT_RESPONDING) return _get_repo
1,796
lib/sds/metrics/metricscollector.py
GeorryHuang/galaxy-sdk-python
17
2172450
import Queue import time from sds.admin.ttypes import ClientMetrics import threading from sds.metrics.Common import UPLOAD_INTERVAL class MetricsCollector: def __init__(self, metric_admin_client): self.queue = Queue.Queue(0) self.metric_admin_client = metric_admin_client metric_upload_thread = MetricUploadThread(self.queue, self.metric_admin_client) metric_upload_thread.setDaemon(True) metric_upload_thread.start() def collect(self, request_metrics): client_metrics = request_metrics.to_client_metrics() for k in client_metrics.metricDataList: self.queue.put(k) class MetricUploadThread(threading.Thread): def __init__(self, queue, metric_admin_client): super(MetricUploadThread, self).__init__() self.queue = queue self.name = "sds-python-sdk-metrics-uploader" self.metric_admin_client = metric_admin_client def run(self): while True: try: start_time = time.time() * 1000 client_metrics = ClientMetrics() metrics_data_list = [] while True: elapsed_time = time.time() * 1000 - start_time if elapsed_time > UPLOAD_INTERVAL: break else: try: metricData = self.queue.get(True, (UPLOAD_INTERVAL - elapsed_time) / 1000) except Queue.Empty as em: break metrics_data_list.append(metricData) client_metrics.metricDataList = metrics_data_list self.metric_admin_client.putClientMetrics(client_metrics) except Exception as e: pass
1,564
utils/loss.py
HibikiJie/MONet
0
2172300
from torch import nn import torch class FocalLoss(nn.Module): def __init__(self, gamma=2, alpha=0.25, r=1e-19): """ :param gamma: gamma>0减少易分类样本的损失。使得更关注于困难的、错分的样本。越大越关注于困难样本的学习 :param alpha:调节正负样本比例 :param r:数值稳定系数。 """ super(FocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.bce_loss = nn.BCELoss() self.r = r def forward(self, p, target): target = target.float() p_min = p.min() p_max = p.max() if p_min < 0 or p_max > 1: raise ValueError('The range of predicted values should be [0, 1]') p = p.reshape(-1, 1) target = target.reshape(-1, 1) loss = -self.alpha * (1 - p) ** self.gamma * (target * torch.log(p + self.r)) - \ (1 - self.alpha) * p ** self.gamma * ((1 - target) * torch.log(1 - p + self.r)) return loss.mean() class FocalLossManyClassification(nn.Module): def __init__(self, num_class, alpha=None, gamma=2, smooth=None, epsilon=1e-19): """ FocalLoss,适用于多分类。输入带有softmax,无需再softmax。 :param num_class: 类别数。 :param alpha: 各类别权重系数,输入列表,长度需要与类别数相同。 :param gamma: 困难样本学习力度 :param smooth: 标签平滑系数 :param epsilon: 数值稳定系数 """ super(FocalLossManyClassification, self).__init__() self.num_class = num_class self.alpha = alpha self.gamma = gamma self.smooth = smooth if self.alpha is None: self.alpha = torch.ones(self.num_class, 1) elif isinstance(self.alpha, list): assert len(self.alpha) == self.num_class self.alpha = torch.FloatTensor(alpha).view(self.num_class, 1) self.alpha = self.alpha / self.alpha.sum() else: raise TypeError('Not support alpha type') if self.smooth is not None: if self.smooth < 0 or self.smooth > 1.0: raise ValueError('Smooth value should be in [0,1]') self.epsilon = epsilon def forward(self, input_, target): '''softmax激活''' logit = torch.softmax(input_, dim=1) if logit.dim() > 2: raise ValueError('The input dimension should be 2') target = target.reshape(-1, 1) alpha = self.alpha if alpha.device != input_.device: alpha = alpha.to(input_.device) idx = target.cpu().long() one_hot_key = torch.FloatTensor(target.size(0), self.num_class).zero_() one_hot_key = one_hot_key.scatter_(1, idx, 1) if one_hot_key.device != logit.device: one_hot_key = one_hot_key.to(logit.device) if self.smooth: one_hot_key = torch.clamp( one_hot_key, self.smooth, 1.0 - self.smooth) pt = (one_hot_key * logit).sum(1) + self.epsilon log_pt = pt.log() alpha = alpha[idx] loss = -1 * alpha * ((1 - pt) ** self.gamma) * log_pt return loss.mean() if __name__ == '__main__': f = FocalLossManyClassification(10, alpha=[1, 2, 15, 4, 8, 6, 7, 7, 9, 4], smooth=0.1) predict = torch.randn(64, 10, requires_grad=True) targets = torch.randint(0, 9, (64,)) loss = f(torch.sigmoid(predict), targets) print(loss) loss.backward() # print(targets)
3,322
src/default_documents/migrations/0048_auto_20160215_1502.py
Talengi/phase
8
2172301
# -*- coding: utf-8 -*- from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('default_documents', '0047_auto_20160211_0835'), ] operations = [ migrations.RenameField( model_name='contractordeliverablerevision', old_name='trs_comments', new_name='file_transmitted', ), ]
397
scripts/generate-event-enum.py
GaloisInc/myxine
41
2170919
#! /usr/bin/env python3 import sys import json from typing import * def generate_rust(events): # Generate names of variants variants = [] for name_words, properties in events: name = ''.join(word.title() for word in name_words) variants.append((name, properties)) # Actually output the text lines = [] lines.append('#[non_exhaustive]') lines.append('#[derive(Clone, Debug, Serialize, Deserialize)]') lines.append('#[serde(rename_all = "lowercase", tag = "event", content = "properties")]') lines.append('enum Event {') for name, properties in variants: lines.append(' #[non_exhaustive]') lines.append(' ' + name + ' { ') items = list(properties.items()) items.sort() for field, type in items: lines.append(' ' + field + ': ' + type + ',') lines.append(' },') lines.append('}') return '\n'.join(lines) languages = { 'rust': generate_rust } def main(): try: _, language, filename = sys.argv except: print("Wrong number of arguments: please specify output language and interface definition JSON file.", file=sys.stderr) try: generate = languages[language] except: print("Invalid language: " + language, file=sys.stderr) try: with open(filename) as x: spec = json.loads(x.read()) except: print("Couldn't open file: " + filename, file=sys.stderr) spec_events = spec['events'] spec_interfaces = spec['interfaces'] events = [] for event, event_info in spec_events.items(): interface = event_info['interface'] name_words = event_info['nameWords'] fields = accum_fields(interface, spec_interfaces) events.append((name_words, fields)) print(generate(events)) # Accumulate all the fields in all super-interfaces of the given interface def accum_fields(interface, interfaces): properties = {} while True: for property, type in interfaces[interface]['properties'].items(): if properties.get(property) is None: properties[property] = type if interfaces[interface]['inherits'] is None: break else: interface = interfaces[interface]['inherits'] return properties if __name__ == '__main__': main()
2,281
python/calculator/main.py
cccaaannn/PracticeProjects
0
2171546
def menu(): print("---------- ---------- Welcome to calculator ---------- ----------\n") valid_operations = (0, 1, 2, 3, 4) while True: operation = input("---------- Please choose operation ----------\n0-exit\n1-sum\n2-subtract\n3-multiply\n4-divide\n: ") print() try: operation = int(operation) if(operation not in valid_operations): raise ValueError except: print("Invalid value entered\n") continue if(operation == 0): break else: try: num1 = float(input("Enter num1: ")) num2 = float(input("Enter num2: ")) except: print("Invalid value entered\n") continue result = "unknown" match operation: case 1: result = num1 + num2 case 2: result = num1 - num2 case 3: result = num1 * num2 case 4: result = num1 / num2 case _: continue print(f"Result is: {result}") print("---------- ---------- ---------- ---------- ----------\n") if __name__ == '__main__': menu()
1,322
sopa/src/models/odenet_mnist/layers.py
SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers
25
2171509
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from functools import partial class MetaODEBlock(nn.Module): def __init__(self, activation_type = 'relu'): super(MetaODEBlock, self).__init__() self.rhs_func = ODEfunc(64, activation_type) self.integration_time = torch.tensor([0, 1]).float() def forward(self, x, solvers, solver_options): nsolvers = len(solvers) if solver_options.solver_mode == 'standalone': y = solvers[0].integrate(self.rhs_func, x = x, t = self.integration_time) elif solver_options.solver_mode == 'switch': if solver_options.switch_probs is not None: switch_probs = solver_options.switch_probs else: switch_probs = [1./nsolvers for _ in range(nsolvers)] solver_id = np.random.choice(range(nsolvers), p = switch_probs) solver_options.switch_solver_id = solver_id y = solvers[solver_id].integrate(self.rhs_func, x = x, t = self.integration_time) elif solver_options.solver_mode == 'ensemble': coin_flip = torch.bernoulli(torch.tensor((1,)), solver_options.ensemble_prob) solver_options.ensemble_coin_flip = coin_flip if coin_flip : if solver_options.ensemble_weights is not None: ensemble_weights = solver_options.ensemble_weights else: ensemble_weights = [1./nsolvers for _ in range(nsolvers)] for i, (wi, solver) in enumerate(zip(ensemble_weights, solvers)): if i == 0: y = wi * solver.integrate(self.rhs_func, x = x, t = self.integration_time) else: y += wi * solver.integrate(self.rhs_func, x = x, t = self.integration_time) else: y = solvers[0].integrate(self.rhs_func, x = x, t = self.integration_time) return y[-1,:,:,:,:] def ss_loss(self, y, solvers, solver_options): z0 = y rhs_func_ss = partial(self.rhs_func, ss_loss = True) integration_time_ss = self.integration_time + 1 nsolvers = len(solvers) if solver_options.solver_mode == 'standalone': z = solvers[0].integrate(rhs_func_ss.func, x = y, t = integration_time_ss) elif solver_options.solver_mode == 'switch': if solver_options.switch_probs is not None: switch_probs = solver_options.switch_probs else: switch_probs = [1./nsolvers for _ in range(nsolvers)] solver_id = solver_options.switch_solver_id z = solvers[solver_id].integrate(rhs_func_ss.func, x = y, t = integration_time_ss) elif solver_options.solver_mode == 'ensemble': coin_flip = solver_options.ensemble_coin_flip if coin_flip : if solver_options.ensemble_weights is not None: ensemble_weights = solver_options.ensemble_weights else: ensemble_weights = [1./nsolvers for _ in range(nsolvers)] for i, (wi, solver) in enumerate(zip(ensemble_weights, solvers)): if i == 0: z = wi * solver.integrate(rhs_func_ss.func, x = y, t = integration_time_ss) else: z += wi * solver.integrate(rhs_func_ss.func, x = y, t = integration_time_ss) else: z = solvers[0].integrate(rhs_func_ss.func, x = y, t = integration_time_ss) z = z[-1,:,:,:,:] - z0 z = torch.norm(z.reshape((z.shape[0], -1)), dim = 1) z = torch.mean(z) return z class MetaNODE(nn.Module): def __init__(self, downsampling_method = 'conv', is_odenet = True, activation_type = 'relu', in_channels = 1): super(MetaNODE, self).__init__() self.is_odenet = is_odenet self.downsampling_layers = nn.Sequential(*build_downsampling_layers(downsampling_method, in_channels)) self.fc_layers = nn.Sequential(*build_fc_layers()) if is_odenet: self.blocks = nn.ModuleList([MetaODEBlock(activation_type)]) else: self.blocks = nn.ModuleList([ResBlock(64, 64) for _ in range(6)]) def forward(self, x, solvers=None, solver_options=None, loss_options = None): self.ss_loss = 0 x = self.downsampling_layers(x) for block in self.blocks: if self.is_odenet: x = block(x, solvers, solver_options) if (loss_options is not None) and loss_options.ss_loss: z = block.ss_loss(x, solvers, solver_options) self.ss_loss += z else: x = block(x) x = self.fc_layers(x) return x def get_ss_loss(self): return self.ss_loss class ODEfunc(nn.Module): def __init__(self, dim, activation_type = 'relu'): super(ODEfunc, self).__init__() if activation_type == 'tanh': activation = nn.Tanh() elif activation_type == 'softplus': activation = nn.Softplus() elif activation_type == 'softsign': activation = nn.Softsign() elif activation_type == 'relu': activation = nn.ReLU() else: raise NotImplementedError('{} activation is not implemented'.format(activation_type)) self.norm1 = norm(dim) self.relu = nn.ReLU(inplace=True) self.conv1 = ConcatConv2d(dim, dim, 3, 1, 1) self.norm2 = norm(dim) self.conv2 = ConcatConv2d(dim, dim, 3, 1, 1) self.norm3 = norm(dim) self.nfe = 0 def forward(self, t, x, ss_loss = False): self.nfe += 1 out = self.norm1(x) out = self.relu(out) out = self.conv1(t, out) out = self.norm2(out) out = self.relu(out) out = self.conv2(t, out) out = self.norm3(out) if ss_loss: out = torch.abs(out) return out def build_downsampling_layers(downsampling_method = 'conv', in_channels = 1): if downsampling_method == 'conv': downsampling_layers = [ nn.Conv2d(in_channels, 64, 3, 1), norm(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 4, 2, 1), norm(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, 4, 2, 1), ] elif downsampling_method == 'res': downsampling_layers = [ nn.Conv2d(in_channels, 64, 3, 1), ResBlock(64, 64, stride=2, downsample=conv1x1(64, 64, 2)), ResBlock(64, 64, stride=2, downsample=conv1x1(64, 64, 2)), ] return downsampling_layers def build_fc_layers(): fc_layers = [norm(64), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d((1, 1)), Flatten(), nn.Linear(64, 10)] return fc_layers def conv3x3(in_planes, out_planes, stride=1): """3x3 convolution with padding""" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) def norm(dim): return nn.GroupNorm(min(32, dim), dim) class ResBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(ResBlock, self).__init__() self.norm1 = norm(inplanes) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.conv1 = conv3x3(inplanes, planes, stride) self.norm2 = norm(planes) self.conv2 = conv3x3(planes, planes) def forward(self, x): shortcut = x out = self.relu(self.norm1(x)) if self.downsample is not None: shortcut = self.downsample(out) out = self.conv1(out) out = self.norm2(out) out = self.relu(out) out = self.conv2(out) return out + shortcut class ConcatConv2d(nn.Module): def __init__(self, dim_in, dim_out, ksize=3, stride=1, padding=0, dilation=1, groups=1, bias=True, transpose=False): super(ConcatConv2d, self).__init__() module = nn.ConvTranspose2d if transpose else nn.Conv2d self._layer = module( dim_in + 1, dim_out, kernel_size=ksize, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias ) def forward(self, t, x): tt = torch.ones_like(x[:, :1, :, :]) * t ttx = torch.cat([tt, x], 1) return self._layer(ttx) class Flatten(nn.Module): def __init__(self): super(Flatten, self).__init__() def forward(self, x): shape = torch.prod(torch.tensor(x.shape[1:])).item() return x.view(-1, shape)
9,094
python/ray/rllib/RL/BRL/policy_iter.py
christopher-hsu/ray
1
2171937
""" <Policy Iteration> Author: <NAME> (<EMAIL>) Affiliation: University of Pennsylvania """ import numpy as np import seeding def policy_iter(env, discount, threshold, T=5000): V = np.zeros(env.snum) policy = np.random.choice(env.anum, env.snum) np_random, _ = seeding.np_random(None) p_stable = False trans_dict = {} rew_dict = {} slip_prob = env.slip if env.stochastic_reward: slip_prob_r = env.slip_r for state in env.eff_states: for action in range(env.anum): transM = np.zeros(env.snum) rewM = np.zeros(env.snum) if env.stochastic: env.slip = 0.0 if env.stochastic_reward: env.slip_r = 0.0 r0, s_n0, _ = env.observe(state,action,np_random) transM[s_n0] = 1.0-slip_prob rewM[s_n0] = (1.0-slip_prob_r)*r0 env.slip_r = 1.0 r1, s_n1, _ = env.observe(state,action,np_random) rewM[s_n1] += slip_prob_r*r1 assert(s_n0 == s_n1) else: r0, s_n0, _ = env.observe(state,action,np_random) transM[s_n0] = 1.0-slip_prob rewM[s_n0] = r0 env.slip = 1.0 if env.stochastic_reward: env.slip_r = 0.0 r0, s_n0, _ = env.observe(state,action,np_random) transM[s_n0] = 1.0-slip_prob rewM[s_n0] = (1.0-slip_prob_r)*r0 env.slip_r = 1.0 r1, s_n1, _ = env.observe(state,action,np_random) rewM[s_n1] += slip_prob_r*r1 else: r1, s_n1, _ = env.observe(state,action,np_random) transM[s_n1] = slip_prob rewM[s_n1] = r1 else: if env.stochastic_reward: env.slip_r = 0.0 r0, s_n0, _ = env.observe(state,action,np_random) transM[s_n0] = 1.0 rewM[s_n0] = (1.0-slip_prob_r)*r0 env.slip_r = 1.0 r1, s_n1, _ = env.observe(state,action,np_random) if s_n1 != s_n0: print("Transition is stochastic!") rewM[s_n1] += slip_prob_r*r1 else: r0, s_n0, _ = env.observe(state,action,np_random) transM[s_n0] = 1.0 rewM[s_n0] = r0 trans_dict[(state,action)] = transM rew_dict[(state,action)] = rewM it = 0 env.slip = slip_prob if env.stochastic_reward: env.slip_r = slip_prob_r while(not p_stable): delta = 1.0 t = 0 while(delta > threshold and t < T): delta = 0 for s in env.eff_states: v_prev = V[s] V[s] = sum([ trans_dict[(s,policy[s])][s_next] * (rew_dict[(s,policy[s])][s_next] \ + int((s_next<env.goal[0]) or (s_next>=env.goal[1]))*discount*V[s_next]) \ for s_next in range(env.snum)]) delta = max(delta, abs(v_prev-V[s])) t += 1 p_stable = True for s in env.eff_states: u_old = policy[s] q_val = [sum([ trans_dict[(s,u)][s_next] * (rew_dict[(s,u)][s_next] \ + int((s_next<env.goal[0]) or (s_next>=env.goal[1]))*discount*V[s_next]) \ for s_next in range(env.snum)]) for u in range(env.anum)] if max(q_val) - min(q_val) < 0.001: policy[s] = 0 else: policy[s] = np.argmax(q_val) if not(u_old == policy[s]): p_stable = False it+=1 print("after %d iterations"%it) Q = np.zeros((env.snum,env.anum)) for s in env.eff_states: for a in range(env.anum): Q[s][a] = sum([ trans_dict[(s,a)][s_next] * (rew_dict[(s,a)][s_next] \ + int((s_next<env.goal[0]) or (s_next>=env.goal[1]))*discount*V[s_next]) \ for s_next in range(env.snum)]) return V, Q, policy def plot_V_pi(Vs, pis, env): for (V, pi) in zip(Vs, pis): plt.figure(figsize=(3, 3)) w = int(np.sqrt(V.shape[0])) plt.imshow(V.reshape(w, w), cmap='gray', interpolation='none', clim=(0, 1)) ax = plt.gca() ax.set_xticks(np.arange(w) - .5) ax.set_yticks(np.arange(w) - .5) ax.set_xticklabels([]) ax.set_yticklabels([]) Y, X = np.mgrid[0:w, 0:w] a2uv = {0: (-1, 0), 1: (0, -1), 2: (1, 0), 3: (-1, 0)} Pi = pi.reshape(w, w) for y in range(w): for x in range(w): a = Pi[y, x] u, v = a2uv[a] plt.arrow(x, y, u * .3, -v * .3, color='m', head_width=0.1, head_length=0.1) plt.text(x, y, str(env.desc[y, x].item().decode()), color='g', size=12, verticalalignment='center', horizontalalignment='center', fontweight='bold') plt.grid(color='b', lw=2, ls='-')
4,282
setup.py
unicef/unicef-rest-export
0
2170554
#!/usr/bin/env python import ast import codecs import os.path import re import subprocess import sys from codecs import open from distutils import log from distutils.errors import DistutilsError from setuptools import find_packages, setup from setuptools.command.install import install from setuptools.command.sdist import sdist as BaseSDistCommand ROOT = os.path.realpath(os.path.dirname(__file__)) init = os.path.join(ROOT, 'src', 'unicef_rest_export', '__init__.py') _version_re = re.compile(r'__version__\s+=\s+(.*)') _name_re = re.compile(r'NAME\s+=\s+(.*)') sys.path.insert(0, os.path.join(ROOT, 'src')) with open(init, 'rb') as f: content = f.read().decode('utf-8') VERSION = str(ast.literal_eval(_version_re.search(content).group(1))) NAME = str(ast.literal_eval(_name_re.search(content).group(1))) setup( name=NAME, version=VERSION, url='https://github.com/unicef/unicef-rest-export', author='UNICEF', author_email='<EMAIL>', license="Apache 2 License", description='Django package that handles exporting of data', long_description=codecs.open('README.rst').read(), package_dir={'': 'src'}, packages=find_packages(where='src'), include_package_data=True, install_requires=( 'django', 'djangorestframework-csv', 'djangorestframework', 'lxml', 'python-docx', 'pytz', 'pyyaml', 'reportlab', 'tablib[html,xlsx,xls]', 'xlrd', 'xlwt', ), extras_require={ 'test': ( 'coverage', 'factory-boy', 'faker', 'flake8', 'isort', 'pytest-cov', 'pytest-django', 'pytest-echo', 'pytest-pythonpath', 'pytest', 'psycopg2', ), }, platforms=['any'], classifiers=[ 'Environment :: Web Environment', 'Programming Language :: Python :: 3.9', 'Framework :: Django', 'Framework :: Django :: 3.2', 'Framework :: Django :: 4.0', 'Intended Audience :: Developers'], scripts=[], )
2,133
tests/unit/raptiformica/utils/test_ensure_directory.py
vdloo/raptiformica
21
2171584
from raptiformica.utils import ensure_directory from tests.testcase import TestCase class TestEnsureDirectory(TestCase): def setUp(self): self.makedirs = self.set_up_patch('raptiformica.utils.makedirs') def test_ensure_directory_makes_dirs_if_path_does_not_exist(self): ensure_directory('/tmp/directory') self.makedirs.assert_called_once_with('/tmp/directory') def test_ensure_directory_does_not_raise_exception_if_dir_already_exists(self): self.makedirs.side_effect = FileExistsError ensure_directory('/tmp/directory')
578
abackend-env/lib/python3.5/site-packages/django_extensions/compat.py
mhotwagner/abackend
0
2172376
from __future__ import unicode_literals import sys import django from django.conf import settings # flake8: noqa # # Python compatibility # PY3 = sys.version_info[0] == 3 OLD_PY2 = sys.version_info[:2] < (2, 7) if PY3: # pragma: no cover from io import StringIO import importlib elif OLD_PY2: # pragma: no cover from cStringIO import StringIO from django.utils import importlib else: # pragma: no cover from cStringIO import StringIO import importlib # # Django compatibility # try: # Django 1.5 from django.contrib.auth import get_user_model except ImportError: # pragma: no cover assert django.VERSION < (1, 5) from django.contrib.auth.models import User User.USERNAME_FIELD = "username" User.get_username = lambda self: self.username def get_user_model(): return User def list_apps(): try: # django >= 1.7, to support AppConfig from django.apps import apps return [app.name for app in apps.get_app_configs()] except ImportError: # old way return settings.INSTALLED_APPS def get_apps(): try: # django >= 1.7, to support AppConfig from django.apps import apps return [app.models_module for app in apps.get_app_configs() if app.models_module] except ImportError: from django.db import models return models.get_apps() def get_app_models(app_labels=None): if app_labels is None: try: # django >= 1.7, to support AppConfig from django.apps import apps return apps.get_models(include_auto_created=True) except ImportError: from django.db import models return models.get_models(include_auto_created=True) if not isinstance(app_labels, (list, tuple, set)): app_labels = [app_labels] app_models = [] try: # django >= 1.7, to support AppConfig from django.apps import apps for app_label in app_labels: app_config = apps.get_app_config(app_label) app_models.extend(app_config.get_models(include_auto_created=True)) except ImportError: from django.db import models try: app_list = [models.get_app(app_label) for app_label in app_labels] except (models.ImproperlyConfigured, ImportError) as e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) for app in app_list: app_models.extend(models.get_models(app, include_auto_created=True)) return app_models
2,576
ecg_arrythmia/ecg_classification.py
zabir-nabil/ecg-arrythmia
4
2171478
# coding: utf-8 # In[1]: from scipy import io, signal import matplotlib.pyplot as plt import dtcwt import numpy as np import itertools import pywt # In[35]: test_path = 'MLII/reformatted_dataset/normal/100m (0)_nsr.mat' ta = io.loadmat(test_path) # In[36]: print(ta['val']) # In[37]: print(ta) # In[38]: ta = ta['val'] # In[39]: print(type(ta)) # In[40]: ta.shape # In[41]: print(ta) # In[42]: import numpy as np ta = np.array(ta) # In[43]: ta = np.reshape(ta, (3600,)) # In[44]: import matplotlib.pyplot as plt plt.plot(ta) plt.show() # In[53]: def plot_ecg(path, tit): ta = io.loadmat(path) ta = ta['val'] ta = np.array(ta) ta = np.reshape(ta, (ta.shape[1],)) plt.plot(ta) plt.title(tit) plt.show() # In[79]: def get_ecg(path): ta = io.loadmat(path) ta = ta['val'] ta = np.array(ta) ta = np.reshape(ta, (ta.shape[1],)) return ta # In[54]: plot_ecg('MLII/reformatted_dataset/normal/100m (0)_nsr.mat', 'Normal Sinus Rhythm') # In[55]: plot_ecg('MLII/reformatted_dataset/normal/107m (5)_pr.mat', 'Pacemaker Rhythm') # In[56]: plot_ecg('MLII/reformatted_dataset/arythmia/100m (0)_apb.mat', 'Atrial Premature Beats') # In[57]: # arythmia detection # In[80]: x = get_ecg('MLII/reformatted_dataset/arythmia/100m (0)_apb.mat') # In[81]: from pywt import wavedec coeffs = wavedec(x, 'db4', level=2) cA2, cD2, cD1 = coeffs # In[82]: plt.plot(cA2) plt.show() plt.plot(cD2) plt.show() plt.plot(cD1) plt.show() # In[83]: # data process import glob nx = [] ax = [] for f in glob.glob('MLII/reformatted_dataset/normal/*.mat'): nx.append(get_ecg(f)) for f in glob.glob('MLII/reformatted_dataset/arythmia/*mat'): ax.append(get_ecg(f)) # In[85]: print(len(nx)) print(len(ax)) # In[77]: import pandas as pd from numpy.linalg import LinAlgError from statsmodels.tsa.stattools import adfuller #1 def AE(x): # Absolute Energy x = np.asarray(x) return sum(x * x) #2 def SM2(y): #t1 = time.time() f, Pxx_den = signal.welch(y) sm2 = 0 n = len(f) for i in range(0,n): sm2 += Pxx_den[i]*(f[i]**2) #t2 = time.time() #print('time: ', t2-t2) return sm2 #3 def LOG(y): n = len(y) return np.exp(np.sum(np.log(np.abs(y)))/n) #4 def WL(x): # WL in primary manuscript return np.sum(abs(np.diff(x))) #6 def AC(x, lag=5): # autocorrelation """ [1] https://en.wikipedia.org/wiki/Autocorrelation#Estimation """ # This is important: If a series is passed, the product below is calculated # based on the index, which corresponds to squaring the series. if type(x) is pd.Series: x = x.values if len(x) < lag: return np.nan # Slice the relevant subseries based on the lag y1 = x[:(len(x)-lag)] y2 = x[lag:] # Subtract the mean of the whole series x x_mean = np.mean(x) # The result is sometimes referred to as "covariation" sum_product = np.sum((y1-x_mean)*(y2-x_mean)) # Return the normalized unbiased covariance return sum_product / ((len(x) - lag) * np.var(x)) #7 def BE(x, max_bins=30): # binned entropy hist, bin_edges = np.histogram(x, bins=max_bins) probs = hist / len(x) return - np.sum(p * np.math.log(p) for p in probs if p != 0) #15 def SE(x): # sample entropy """ [1] http://en.wikipedia.org/wiki/Sample_Entropy [2] https://www.ncbi.nlm.nih.gov/pubmed/10843903?dopt=Abstract """ x = np.array(x) sample_length = 1 # number of sequential points of the time series tolerance = 0.2 * np.std(x) # 0.2 is a common value for r - why? n = len(x) prev = np.zeros(n) curr = np.zeros(n) A = np.zeros((1, 1)) # number of matches for m = [1,...,template_length - 1] B = np.zeros((1, 1)) # number of matches for m = [1,...,template_length] for i in range(n - 1): nj = n - i - 1 ts1 = x[i] for jj in range(nj): j = jj + i + 1 if abs(x[j] - ts1) < tolerance: # distance between two vectors curr[jj] = prev[jj] + 1 temp_ts_length = min(sample_length, curr[jj]) for m in range(int(temp_ts_length)): A[m] += 1 if j < n - 1: B[m] += 1 else: curr[jj] = 0 for j in range(nj): prev[j] = curr[j] N = n * (n - 1) / 2 B = np.vstack(([N], B[0])) # sample entropy = -1 * (log (A/B)) similarity_ratio = A / B se = -1 * np.log(similarity_ratio) se = np.reshape(se, -1) return se[0] #16 def TRAS(x, lag=5): # time reversal asymmetry statistic """ | [1] <NAME>., <NAME>. (2014). | Highly comparative feature-based time-series classification. | Knowledge and Data Engineering, IEEE Transactions on 26, 3026–3037. """ n = len(x) x = np.asarray(x) if 2 * lag >= n: return 0 else: return np.mean((np.roll(x, 2 * -lag) * np.roll(x, 2 * -lag) * np.roll(x, -lag) - np.roll(x, -lag) * x * x)[0:(n - 2 * lag)]) #17 def VAR(x): # variance return np.var(x) # In[89]: len(nx[0]) # In[90]: def get_A(x): coeffs = wavedec(x, 'db4', level=2) cA2, cD2, cD1 = coeffs return cA2 # In[91]: len(get_A(nx[0])) # In[92]: print(len(nx)) # In[97]: import time fx = [] y = [] t1 = time.time() for i in range(len(nx)): cf = [] cf.append(AE(get_A(nx[i]))) cf.append(SM2(get_A(nx[i]))) cf.append((LOG(get_A(nx[i])))) cf.append((WL(get_A(nx[i])))) cf.append((AC(get_A(nx[i])))) cf.append(((BE(get_A(nx[i]))))) cf.append((SE(get_A(nx[i])))) cf.append((TRAS(get_A(nx[i])))) cf.append((VAR(get_A(nx[i])))) fx.append(cf) y.append(1) print('.', end = '') t2 = time.time() print(t2-t1) # In[99]: print(len(fx)) print(len(y)) # In[100]: print(len(fx[0])) # In[101]: t1 = time.time() for i in range(len(ax)): cf = [] cf.append(AE(get_A(ax[i]))) cf.append(SM2(get_A(ax[i]))) cf.append((LOG(get_A(ax[i])))) cf.append((WL(get_A(ax[i])))) cf.append((AC(get_A(ax[i])))) cf.append(((BE(get_A(ax[i]))))) cf.append((SE(get_A(ax[i])))) cf.append((TRAS(get_A(ax[i])))) cf.append((VAR(get_A(ax[i])))) fx.append(cf) y.append(0) print('.', end = '') t2 = time.time() print(t2-t1) # In[102]: print(len(fx)) print(len(y)) # In[103]: import numpy as np fx = np.array(fx, dtype = 'float32') # In[104]: fx.shape # In[105]: from sklearn.preprocessing import StandardScaler scaler = StandardScaler() scaler.fit(fx) print(scaler.mean_) X_all = scaler.transform(fx) print(np.mean(X_all)) print(np.std(X_all)) # In[109]: from sklearn.model_selection import cross_val_score from sklearn import svm clf = svm.SVC(kernel='linear', C=1) scores = cross_val_score(clf, X_all, y, cv=5) print('Accuracy: ', scores.mean(), scores.std() * 2) # In[112]: from sklearn.neighbors import KNeighborsClassifier knn = KNeighborsClassifier(n_neighbors=100) scores = cross_val_score(knn, X_all, y, cv=5) print('Accuracy: ', scores.mean(), scores.std() * 2) # In[114]: from sklearn import tree clf = tree.DecisionTreeClassifier() scores = cross_val_score(clf, X_all, y, cv=5) print('Accuracy: ', scores.mean(), scores.std() * 2) # In[130]: from sklearn.ensemble import RandomForestClassifier clf = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=4, random_state=0) scores = cross_val_score(clf, X_all, y, cv=5) print('Accuracy: ', scores.mean(), scores.std() * 2) # In[125]: from sklearn.ensemble import AdaBoostClassifier clf = AdaBoostClassifier(n_estimators=10) scores = cross_val_score(clf, X_all, y, cv=5) print('Accuracy: ', scores.mean(), scores.std() * 2) # In[ ]:
7,851
app/motivator/motivator_bot/telegram_bot.py
SabaunT/bot-motivator
0
2170628
import logging from telegram import Bot from telegram.ext import Updater, ConversationHandler from app.settings import AppSettings from app.motivator.motivator_bot.handlers import ( register_habits_conv_handler_kwargs, delete_habits_conv_handler_kwargs, help_handler ) logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) class MotivatorBot: # todo logging def __init__(self): self.token = AppSettings.TOKEN self.updater = Updater(token=self.token, use_context=True) self.dispatcher = self.updater.dispatcher def setup(self): register_habits_conv_handler = ConversationHandler(**register_habits_conv_handler_kwargs) delete_habits_conv_handler = ConversationHandler(**delete_habits_conv_handler_kwargs) self.dispatcher.add_handler(help_handler) self.dispatcher.add_handler(register_habits_conv_handler) self.dispatcher.add_handler(delete_habits_conv_handler) def run(self): self.updater.start_polling() self.updater.idle() @property def bot(self): return Bot(token=self.token) motivator = MotivatorBot()
1,218
pyrustic/widget/pathentry.py
tutlane/pyrustic
0
2172316
import tkinter as tk from tkinter import filedialog from pyrustic import widget from pyrustic.tkmisc import merge_cnfs from pyrustic.view import View ENTRY = "entry" BUTTON = "button" DIALOG = "dialog" class Pathentry(widget.Frame): """ """ def __init__(self, master=None, browse="file", width=17, title=None, initialdir=None, cnfs=None): """ - master: widget parent. Example: an instance of tk.Frame """ self.__cnfs = merge_cnfs({ENTRY: {"width": width}}, cnfs, components=("body", ENTRY, BUTTON, DIALOG)) super().__init__(master=master, class_="Pathentry", cnf=self.__cnfs["body"], on_build=self.__on_build, on_display=self.__on_display, on_destroy=self.__on_destroy) self.__browse = browse self.__title = title self.__initialdir = initialdir self.__entry = None self.__button = None self.__components = {} self.__string_var = tk.StringVar(value="") # build self.__view = self.build() # ============================================== # PROPERTIES # ============================================== @property def components(self): """ """ return self.__components @property def string_var(self): return self.__string_var @property def path(self): return self.__path @path.setter def path(self, val): self.__path = val def __on_build(self): self.__entry = tk.Entry(self, textvariable=self.__string_var, cnf=self.__cnfs[ENTRY]) self.__entry.pack(side=tk.LEFT, pady=0, fill=tk.X, expand=1) self.__components["entry"] = self.__entry self.__button = tk.Button(self, text="...", command=self.__on_click_button, cnf=self.__cnfs[BUTTON]) self.__button.pack(side=tk.LEFT, padx=(2, 0), fill=tk.Y) self.__components["button"] = self.__button def __on_display(self): pass def __on_destroy(self): pass def __on_click_button(self): if self.__browse == "file": try: filename = filedialog.askopenfilename(initialdir=self.__initialdir, title=self.__title, **self.__cnfs[DIALOG]) except Exception as e: return path = None if not filename: pass elif isinstance(filename, str): path = filename else: path = ";".join(filename) if path: self.__string_var.set(path) else: try: filename = filedialog.askdirectory(initialdir=self.__initialdir, title=self.__title, **self.__cnfs[DIALOG]) except Exception as e: return path = None if not filename: pass elif isinstance(filename, str): path = filename else: path = ";".join(filename) if path: self.__string_var.set(path) self.__entry.icursor("end") if __name__ == "__main__": root = tk.Tk() pathentry_test = Pathentry(root, browse="dir", extra_options={"dialog": {"initialdir": "/home/alex", "title": "Hello"}}) pathentry_test.pack(fill=tk.BOTH, expand=1) root.mainloop()
4,023
third_party/xcb_proto/protos.bzl
TokTok/toktok-stack
12
2172156
"""List of all the XCB protocols.""" xcb_protos = [ "bigreq", "composite", "damage", "dpms", "dri2", "dri3", "ge", "glx", "present", "randr", "record", "render", "res", "screensaver", "shape", "shm", "sync", "xc_misc", "xevie", "xf86dri", "xf86vidmode", "xfixes", "xinerama", "xinput", "xkb", "xprint", "xproto", "xselinux", "xtest", "xv", "xvmc", ]
475
worker/__main__.py
Antonio32A/blurplefier
3
2172308
# -*- coding: utf-8 -*- from . import Worker from common import setup_logging with setup_logging(): Worker.with_config().run()
134