\n \n

{% blocktrans %}Index of {{ directory }}{% endblocktrans %}

\n
    \n {% ifnotequal directory \"/\" %}\n
  • ../
  • \n {% endifnotequal %}\n {% for f in file_list %}\n
  • {{ f }}
  • \n {% endfor %}\n
\n \n\n\"\"\"\ntemplate_translatable = ugettext_lazy(\"Index of %(directory)s\")\n\n\ndef directory_index(path, fullpath):\n try:\n t = loader.select_template([\n 'static/directory_index.html',\n 'static/directory_index',\n ])\n except TemplateDoesNotExist:\n t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)\n files = []\n for f in os.listdir(fullpath):\n if not f.startswith('.'):\n if os.path.isdir(os.path.join(fullpath, f)):\n f += '/'\n files.append(f)\n c = Context({\n 'directory': path + '/',\n 'file_list': files,\n })\n return HttpResponse(t.render(c))\n\n\ndef was_modified_since(header=None, mtime=0, size=0):\n \"\"\"\n Was something modified since the user last downloaded it?\n\n header\n This is the value of the If-Modified-Since header. If this is None,\n I'll just return True.\n\n mtime\n This is the modification time of the item we're talking about.\n\n size\n This is the size of the item we're talking about.\n \"\"\"\n try:\n if header is None:\n raise ValueError\n matches = re.match(r\"^([^;]+)(; length=([0-9]+))?$\", header,\n re.IGNORECASE)\n header_mtime = parse_http_date(matches.group(1))\n header_len = matches.group(3)\n if header_len and int(header_len) != size:\n raise ValueError\n if int(mtime) > header_mtime:\n raise ValueError\n except (AttributeError, ValueError, OverflowError):\n return True\n return False\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-7987543726296927000,"string":"-7,987,543,726,296,927,000"},"line_mean":{"kind":"number","value":33.0529801325,"string":"33.05298"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.6248541424,"string":"0.624854"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.9311926605504586,"string":"3.931193"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":46,"cells":{"repo_name":{"kind":"string","value":"dionbosschieter/NetworkMonitor"},"path":{"kind":"string","value":"src/Client/InfoContainer.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1892"},"content":{"kind":"string","value":"import terminal\nimport curses\nimport time\nfrom curses import panel\n\nclass InfoContainer(object):\n\n def __init__(self, stdscreen, title, debug_console):\n self.debug_console = debug_console\n self.height = int(terminal.height/2)\n self.width = terminal.width - 2\n self.title = title\n\n self.window = stdscreen.subwin(self.height,self.width,1,1)\n self.window.border(0)\n self.window.addstr(0,1,title)\n self.panel = panel.new_panel(self.window)\n self.panel.hide()\n panel.update_panels()\n # Add the Border\n self.second = time.time()\n self.writebuffer = []\n\n def display(self):\n self.panel.top()\n self.panel.show()\n #self.window.clear()\n\n def hide(self):\n self.window.clear()\n self.panel.hide()\n panel.update_panels()\n curses.doupdate()\n\n def refresh(self):\n self.window.clear()\n self.window.border(0)\n self.window.addstr(0,1,self.title)\n #draw the last 20 log items\n #foreach i from 0 till (self.height-2) \n #draw string on i place\n #self.writebuffer[-(self.height-2):]\n \n maxlength = (self.height-3)\n lengthofbuffer = len(self.writebuffer)\n if(lengthofbuffer>maxlength):\n startindex = (lengthofbuffer-1)-maxlength\n else:\n startindex = 0\n maxlength = lengthofbuffer\n\n for i in range(0, maxlength):\n #self.window.addstr(i,1,str(i))\n self.window.addstr(i+1,1,self.writebuffer[i+startindex])\n\n self.window.refresh()\n curses.doupdate()\n\n def addPacket(self, packet):\n #1 refresh per second// or 2?\n if(time.time() - self.second >= 1):\n self.writebuffer.append(packet)\n self.second = time.time()\n else:\n self.writebuffer.append(packet)\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":8029939048508223000,"string":"8,029,939,048,508,223,000"},"line_mean":{"kind":"number","value":27.6666666667,"string":"27.666667"},"line_max":{"kind":"number","value":68,"string":"68"},"alpha_frac":{"kind":"number","value":0.5819238901,"string":"0.581924"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.753968253968254,"string":"3.753968"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":47,"cells":{"repo_name":{"kind":"string","value":"eriol/circuits"},"path":{"kind":"string","value":"examples/node/nodeserver.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"2429"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n\n\"\"\"Node Server Example\n\nThis example demonstrates how to create a very simple node server\nthat supports bi-diractional messaging between server and connected\nclients forming a cluster of nodes.\n\"\"\"\n\n\nfrom __future__ import print_function\n\nfrom os import getpid\nfrom optparse import OptionParser\n\n\nfrom circuits.node import Node\nfrom circuits import Component, Debugger\n\n\n__version__ = \"0.0.1\"\n\nUSAGE = \"%prog [options]\"\nVERSION = \"%prog v\" + __version__\n\n\ndef parse_options():\n parser = OptionParser(usage=USAGE, version=VERSION)\n\n parser.add_option(\n \"-b\", \"--bind\",\n action=\"store\", type=\"string\",\n default=\"0.0.0.0:8000\", dest=\"bind\",\n help=\"Bind to address:[port]\"\n )\n\n parser.add_option(\n \"-d\", \"--debug\",\n action=\"store_true\",\n default=False, dest=\"debug\",\n help=\"Enable debug mode\"\n )\n\n opts, args = parser.parse_args()\n\n return opts, args\n\n\nclass NodeServer(Component):\n\n def init(self, args, opts):\n \"\"\"Initialize our ``ChatServer`` Component.\n\n This uses the convenience ``init`` method which is called after the\n component is proeprly constructed and initialized and passed the\n same args and kwargs that were passed during construction.\n \"\"\"\n\n self.args = args\n self.opts = opts\n\n self.clients = {}\n\n if opts.debug:\n Debugger().register(self)\n\n if \":\" in opts.bind:\n address, port = opts.bind.split(\":\")\n port = int(port)\n else:\n address, port = opts.bind, 8000\n\n Node(port=port, server_ip=address).register(self)\n\n def connect(self, sock, host, port):\n \"\"\"Connect Event -- Triggered for new connecting clients\"\"\"\n\n self.clients[sock] = {\n \"host\": sock,\n \"port\": port,\n }\n\n def disconnect(self, sock):\n \"\"\"Disconnect Event -- Triggered for disconnecting clients\"\"\"\n\n if sock not in self.clients:\n return\n\n del self.clients[sock]\n\n def ready(self, server, bind):\n print(\"Ready! Listening on {}:{}\".format(*bind))\n print(\"Waiting for remote events...\")\n\n def hello(self):\n return \"Hello World! ({0:d})\".format(getpid())\n\n\ndef main():\n opts, args = parse_options()\n\n # Configure and \"run\" the System.\n NodeServer(args, opts).run()\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-6181088414534044000,"string":"-6,181,088,414,534,044,000"},"line_mean":{"kind":"number","value":21.4907407407,"string":"21.490741"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.5961300947,"string":"0.59613"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.075503355704698,"string":"4.075503"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":48,"cells":{"repo_name":{"kind":"string","value":"krieger-od/nwjs_chromium.src"},"path":{"kind":"string","value":"chrome/common/extensions/docs/server2/build_server.py"},"copies":{"kind":"string","value":"80"},"size":{"kind":"string","value":"3340"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n# This script is used to copy all dependencies into the local directory.\n# The package of files can then be uploaded to App Engine.\nimport os\nimport shutil\nimport stat\nimport sys\n\nSRC_DIR = os.path.join(sys.path[0], os.pardir, os.pardir, os.pardir, os.pardir,\n os.pardir)\nTHIRD_PARTY_DIR = os.path.join(SRC_DIR, 'third_party')\nLOCAL_THIRD_PARTY_DIR = os.path.join(sys.path[0], 'third_party')\nTOOLS_DIR = os.path.join(SRC_DIR, 'tools')\nSCHEMA_COMPILER_FILES = ['memoize.py',\n 'model.py',\n 'idl_schema.py',\n 'schema_util.py',\n 'json_parse.py',\n 'json_schema.py']\n\ndef MakeInit(path):\n path = os.path.join(path, '__init__.py')\n with open(os.path.join(path), 'w') as f:\n os.utime(os.path.join(path), None)\n\ndef OnError(function, path, excinfo):\n os.chmod(path, stat.S_IWUSR)\n function(path)\n\ndef CopyThirdParty(src, dest, files=None, make_init=True):\n dest_path = os.path.join(LOCAL_THIRD_PARTY_DIR, dest)\n if not files:\n shutil.copytree(src, dest_path)\n if make_init:\n MakeInit(dest_path)\n return\n try:\n os.makedirs(dest_path)\n except Exception:\n pass\n if make_init:\n MakeInit(dest_path)\n for filename in files:\n shutil.copy(os.path.join(src, filename), os.path.join(dest_path, filename))\n\ndef main():\n if os.path.isdir(LOCAL_THIRD_PARTY_DIR):\n try:\n shutil.rmtree(LOCAL_THIRD_PARTY_DIR, False, OnError)\n except OSError:\n print('*-------------------------------------------------------------*\\n'\n '| If you are receiving an upload error, try removing |\\n'\n '| chrome/common/extensions/docs/server2/third_party manually. |\\n'\n '*-------------------------------------------------------------*\\n')\n\n\n CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'motemplate'), 'motemplate')\n CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'markdown'), 'markdown',\n make_init=False)\n CopyThirdParty(os.path.join(SRC_DIR, 'ppapi', 'generators'),\n 'json_schema_compiler')\n CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'ply'),\n os.path.join('json_schema_compiler', 'ply'))\n CopyThirdParty(os.path.join(TOOLS_DIR, 'json_schema_compiler'),\n 'json_schema_compiler',\n SCHEMA_COMPILER_FILES)\n CopyThirdParty(os.path.join(TOOLS_DIR, 'json_comment_eater'),\n 'json_schema_compiler',\n ['json_comment_eater.py'])\n CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'simplejson'),\n os.path.join('json_schema_compiler', 'simplejson'),\n make_init=False)\n MakeInit(LOCAL_THIRD_PARTY_DIR)\n\n CopyThirdParty(os.path.join(THIRD_PARTY_DIR, 'google_appengine_cloudstorage',\n 'cloudstorage'), 'cloudstorage')\n\n # To be able to use the Motemplate class we need this import in __init__.py.\n with open(os.path.join(LOCAL_THIRD_PARTY_DIR,\n 'motemplate',\n '__init__.py'), 'a') as f:\n f.write('from motemplate import Motemplate\\n')\n\nif __name__ == '__main__':\n main()\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-4363357893083128000,"string":"-4,363,357,893,083,128,000"},"line_mean":{"kind":"number","value":36.5280898876,"string":"36.52809"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.5946107784,"string":"0.594611"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.380566801619433,"string":"3.380567"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":49,"cells":{"repo_name":{"kind":"string","value":"bijanfallah/OI_CCLM"},"path":{"kind":"string","value":"src/RMSE_MAPS_INGO.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"2007"},"content":{"kind":"string","value":"# Program to show the maps of RMSE averaged over time\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_squared_error\nimport os\nfrom netCDF4 import Dataset as NetCDFFile\nimport numpy as np\nfrom CCLM_OUTS import Plot_CCLM\n# option == 1 -> shift 4 with default cclm domain and nboundlines = 3\n# option == 2 -> shift 4 with smaller cclm domain and nboundlines = 3\n# option == 3 -> shift 4 with smaller cclm domain and nboundlines = 6\n# option == 4 -> shift 4 with corrected smaller cclm domain and nboundlines = 3\n# option == 5 -> shift 4 with corrected smaller cclm domain and nboundlines = 4\n# option == 6 -> shift 4 with corrected smaller cclm domain and nboundlines = 6\n# option == 7 -> shift 4 with corrected smaller cclm domain and nboundlines = 9\n# option == 8 -> shift 4 with corrected bigger cclm domain and nboundlines = 3\nfrom CCLM_OUTS import Plot_CCLM\n#def f(x):\n# if x==-9999:\n# return float('NaN')\n# else:\n# return x\ndef read_data_from_mistral(dir='/work/bb1029/b324045/work1/work/member/post/',name='member_T_2M_ts_seasmean.nc',var='T_2M'):\n # type: (object, object, object) -> object\n #a function to read the data from mistral work\n\n \"\"\"\n\n :rtype: object\n \"\"\"\n #CMD = 'scp $mistral:' + dir + name + ' ./'\n CMD = 'wget users.met.fu-berlin.de/~BijanFallah/' + dir + name\n os.system(CMD)\n nc = NetCDFFile(name)\n# for name2, variable in nc.variables.items():\n# for attrname in variable.ncattrs():\n# print(name2, variable, '-----------------',attrname)\n# #print(\"{} -- {}\".format(attrname, getattr(variable, attrname)))\n os.remove(name)\n lats = nc.variables['lat'][:]\n lons = nc.variables['lon'][:]\n t = nc.variables[var][:].squeeze()\n rlats = nc.variables['rlat'][:] # extract/copy the data\n rlons = nc.variables['rlon'][:]\n #f2 = np.vectorize(f)\n #t= f2(t)\n #t=t.data\n t=t.squeeze()\n #print()\n nc.close()\n\n return(t, lats, lons, rlats, rlons)\n\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":6172872896229471000,"string":"6,172,872,896,229,471,000"},"line_mean":{"kind":"number","value":37.5961538462,"string":"37.596154"},"line_max":{"kind":"number","value":124,"string":"124"},"alpha_frac":{"kind":"number","value":0.6372695566,"string":"0.63727"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.1606299212598423,"string":"3.16063"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":50,"cells":{"repo_name":{"kind":"string","value":"steven-hadfield/dpxdt"},"path":{"kind":"string","value":"deployment/appengine/appengine_config.py"},"copies":{"kind":"string","value":"4"},"size":{"kind":"string","value":"2989"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2013 Brett Slatkin\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"App Engine configuration file.\n\nSee:\n https://developers.google.com/appengine/docs/python/tools/appengineconfig\n\"\"\"\n\nimport os\nimport logging\nimport os\nimport sys\n\n\n# Log to disk for managed VMs:\n# https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#logging\nif os.environ.get('LOG_TO_DISK'):\n log_dir = '/var/log/app_engine/custom_logs'\n try:\n os.makedirs(log_dir)\n except OSError:\n pass # Directory already exists\n\n log_path = os.path.join(log_dir, 'app.log')\n handler = logging.FileHandler(log_path)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(logging.Formatter(\n '%(levelname)s %(filename)s:%(lineno)s] %(message)s'))\n logging.getLogger().addHandler(handler)\n\n\n# Load up our app and all its dependencies. Make the environment sane.\nfrom dpxdt.tools import run_server\n\n\n# Initialize flags from flags file or enviornment.\nimport gflags\ngflags.FLAGS(['dpxdt_server', '--flagfile=flags.cfg'])\nlogging.info('BEGIN Flags')\nfor key, flag in gflags.FLAGS.FlagDict().iteritems():\n logging.info('%s = %s', key, flag.value)\nlogging.info('END Flags')\n\n\n# When in production use precompiled templates. Sometimes templates break\n# in production. To debug templates there, comment this out entirely.\nif os.environ.get('SERVER_SOFTWARE', '').startswith('Google App Engine'):\n import jinja2\n from dpxdt.server import app\n app.jinja_env.auto_reload = False\n app.jinja_env.loader = jinja2.ModuleLoader('templates_compiled.zip')\n\n\n# Install dpxdt.server override hooks.\nfrom dpxdt.server import api\nimport hooks\n\napi._artifact_created = hooks._artifact_created\napi._get_artifact_response = hooks._get_artifact_response\n\n\n# Don't log when appstats is active.\nappstats_DUMP_LEVEL = -1\n\n# SQLAlchemy stacks are really deep.\nappstats_MAX_STACK = 20\n\n# Use very shallow local variable reprs to reduce noise.\nappstats_MAX_DEPTH = 2\n\n# Enable the remote shell, since the old admin interactive console doesn't\n# work with managed VMs.\nappstats_SHELL_OK = True\n\n\n# These are only used if gae_mini_profiler was properly installed\ndef gae_mini_profiler_should_profile_production():\n from google.appengine.api import users\n return users.is_current_user_admin()\n\n\ndef gae_mini_profiler_should_profile_development():\n return True\n\n\n# Fix the appstats module's formatting helper function.\nimport appstats_monkey_patch\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":6358295125254556000,"string":"6,358,295,125,254,556,000"},"line_mean":{"kind":"number","value":28.89,"string":"28.89"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.7423887588,"string":"0.742389"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.6946847960444993,"string":"3.694685"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":51,"cells":{"repo_name":{"kind":"string","value":"gangadharkadam/contributionerp"},"path":{"kind":"string","value":"erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"3173"},"content":{"kind":"string","value":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\nfrom __future__ import unicode_literals\n\nimport frappe\nfrom frappe import _\n\ndef execute(filters=None):\n\tcolumns = get_columns()\n\tproj_details = get_project_details()\n\tpr_item_map = get_purchased_items_cost()\n\tse_item_map = get_issued_items_cost()\n\tdn_item_map = get_delivered_items_cost()\n\n\tdata = []\n\tfor project in proj_details:\n\t\tdata.append([project.name, pr_item_map.get(project.name, 0),\n\t\t\tse_item_map.get(project.name, 0), dn_item_map.get(project.name, 0),\n\t\t\tproject.project_name, project.status, project.company,\n\t\t\tproject.customer, project.estimated_costing, project.expected_start_date,\n\t\t\tproject.expected_end_date])\n\n\treturn columns, data\n\ndef get_columns():\n\treturn [_(\"Project Id\") + \":Link/Project:140\", _(\"Cost of Purchased Items\") + \":Currency:160\",\n\t\t_(\"Cost of Issued Items\") + \":Currency:160\", _(\"Cost of Delivered Items\") + \":Currency:160\",\n\t\t_(\"Project Name\") + \"::120\", _(\"Project Status\") + \"::120\", _(\"Company\") + \":Link/Company:100\",\n\t\t_(\"Customer\") + \":Link/Customer:140\", _(\"Project Value\") + \":Currency:120\",\n\t\t_(\"Project Start Date\") + \":Date:120\", _(\"Completion Date\") + \":Date:120\"]\n\ndef get_project_details():\n\treturn frappe.db.sql(\"\"\" select name, project_name, status, company, customer, estimated_costing,\n\t\texpected_start_date, expected_end_date from tabProject where docstatus < 2\"\"\", as_dict=1)\n\ndef get_purchased_items_cost():\n\tpr_items = frappe.db.sql(\"\"\"select project_name, sum(base_net_amount) as amount\n\t\tfrom `tabPurchase Receipt Item` where ifnull(project_name, '') != ''\n\t\tand docstatus = 1 group by project_name\"\"\", as_dict=1)\n\n\tpr_item_map = {}\n\tfor item in pr_items:\n\t\tpr_item_map.setdefault(item.project_name, item.amount)\n\n\treturn pr_item_map\n\ndef get_issued_items_cost():\n\tse_items = frappe.db.sql(\"\"\"select se.project_name, sum(se_item.amount) as amount\n\t\tfrom `tabStock Entry` se, `tabStock Entry Detail` se_item\n\t\twhere se.name = se_item.parent and se.docstatus = 1 and ifnull(se_item.t_warehouse, '') = ''\n\t\tand ifnull(se.project_name, '') != '' group by se.project_name\"\"\", as_dict=1)\n\n\tse_item_map = {}\n\tfor item in se_items:\n\t\tse_item_map.setdefault(item.project_name, item.amount)\n\n\treturn se_item_map\n\ndef get_delivered_items_cost():\n\tdn_items = frappe.db.sql(\"\"\"select dn.project_name, sum(dn_item.base_net_amount) as amount\n\t\tfrom `tabDelivery Note` dn, `tabDelivery Note Item` dn_item\n\t\twhere dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project_name, '') != ''\n\t\tgroup by dn.project_name\"\"\", as_dict=1)\n\n\tsi_items = frappe.db.sql(\"\"\"select si.project_name, sum(si_item.base_net_amount) as amount\n\t\tfrom `tabSales Invoice` si, `tabSales Invoice Item` si_item\n\t\twhere si.name = si_item.parent and si.docstatus = 1 and ifnull(si.update_stock, 0) = 1\n\t\tand ifnull(si.is_pos, 0) = 1 and ifnull(si.project_name, '') != ''\n\t\tgroup by si.project_name\"\"\", as_dict=1)\n\n\n\tdn_item_map = {}\n\tfor item in dn_items:\n\t\tdn_item_map.setdefault(item.project_name, item.amount)\n\n\tfor item in si_items:\n\t\tdn_item_map.setdefault(item.project_name, item.amount)\n\n\treturn dn_item_map\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-2526490087109348000,"string":"-2,526,490,087,109,348,000"},"line_mean":{"kind":"number","value":39.164556962,"string":"39.164557"},"line_max":{"kind":"number","value":98,"string":"98"},"alpha_frac":{"kind":"number","value":0.6920895052,"string":"0.69209"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":2.987758945386064,"string":"2.987759"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":52,"cells":{"repo_name":{"kind":"string","value":"imsparsh/python-for-android"},"path":{"kind":"string","value":"python3-alpha/extra_modules/gdata/books/__init__.py"},"copies":{"kind":"string","value":"124"},"size":{"kind":"string","value":"18532"},"content":{"kind":"string","value":"#!/usr/bin/python\n\n\"\"\"\n Data Models for books.service\n\n All classes can be instantiated from an xml string using their FromString\n class method.\n\n Notes:\n * Book.title displays the first dc:title because the returned XML\n repeats that datum as atom:title.\n There is an undocumented gbs:openAccess element that is not parsed.\n\"\"\"\n\n__author__ = \"James Sams \"\n__copyright__ = \"Apache License v2.0\"\n\nimport atom\nimport gdata\n\n\nBOOK_SEARCH_NAMESPACE = 'http://schemas.google.com/books/2008'\nDC_NAMESPACE = 'http://purl.org/dc/terms' \nANNOTATION_REL = \"http://schemas.google.com/books/2008/annotation\"\nINFO_REL = \"http://schemas.google.com/books/2008/info\"\nLABEL_SCHEME = \"http://schemas.google.com/books/2008/labels\"\nPREVIEW_REL = \"http://schemas.google.com/books/2008/preview\"\nTHUMBNAIL_REL = \"http://schemas.google.com/books/2008/thumbnail\"\nFULL_VIEW = \"http://schemas.google.com/books/2008#view_all_pages\"\nPARTIAL_VIEW = \"http://schemas.google.com/books/2008#view_partial\"\nNO_VIEW = \"http://schemas.google.com/books/2008#view_no_pages\"\nUNKNOWN_VIEW = \"http://schemas.google.com/books/2008#view_unknown\"\nEMBEDDABLE = \"http://schemas.google.com/books/2008#embeddable\"\nNOT_EMBEDDABLE = \"http://schemas.google.com/books/2008#not_embeddable\"\n\n\n\nclass _AtomFromString(atom.AtomBase):\n\n #@classmethod\n def FromString(cls, s):\n return atom.CreateClassFromXMLString(cls, s)\n\n FromString = classmethod(FromString)\n\n\nclass Creator(_AtomFromString):\n \"\"\"\n The element identifies an author-or more generally, an entity\n responsible for creating the volume in question. Examples of a creator\n include a person, an organization, or a service. In the case of \n anthologies, proceedings, or other edited works, this field may be used to \n indicate editors or other entities responsible for collecting the volume's \n contents.\n \n This element appears as a child of . If there are multiple authors or\n contributors to the book, there may be multiple elements in the\n volume entry (one for each creator or contributor).\n \"\"\"\n\n _tag = 'creator'\n _namespace = DC_NAMESPACE\n\n\nclass Date(_AtomFromString): #iso 8601 / W3CDTF profile\n \"\"\"\n The element indicates the publication date of the specific volume\n in question. If the book is a reprint, this is the reprint date, not the \n original publication date. The date is encoded according to the ISO-8601 \n standard (and more specifically, the W3CDTF profile).\n\n The element can appear only as a child of .\n \n Usually only the year or the year and the month are given.\n\n YYYY-MM-DDThh:mm:ssTZD TZD = -hh:mm or +hh:mm\n \"\"\"\n \n _tag = 'date' \n _namespace = DC_NAMESPACE\n \n\nclass Description(_AtomFromString):\n \"\"\"\n The element includes text that describes a book or book \n result. In a search result feed, this may be a search result \"snippet\" that\n contains the words around the user's search term. For a single volume feed,\n this element may contain a synopsis of the book.\n\n The element can appear only as a child of \n \"\"\"\n\n _tag = 'description'\n _namespace = DC_NAMESPACE\n\n\nclass Format(_AtomFromString):\n \"\"\"\n The element describes the physical properties of the volume. \n Currently, it indicates the number of pages in the book, but more \n information may be added to this field in the future.\n\n This element can appear only as a child of .\n \"\"\"\n\n _tag = 'format'\n _namespace = DC_NAMESPACE\n\n\nclass Identifier(_AtomFromString):\n \"\"\"\n The element provides an unambiguous reference to a \n particular book.\n * Every contains at least one child.\n * The first identifier is always the unique string Book Search has assigned\n to the volume (such as s1gVAAAAYAAJ). This is the ID that appears in the \n book's URL in the Book Search GUI, as well as in the URL of that book's \n single item feed.\n * Many books contain additional elements. These provide \n alternate, external identifiers to the volume. Such identifiers may \n include the ISBNs, ISSNs, Library of Congress Control Numbers (LCCNs), \n and OCLC numbers; they are prepended with a corresponding namespace \n prefix (such as \"ISBN:\").\n * Any can be passed to the Dynamic Links, used to \n instantiate an Embedded Viewer, or even used to construct static links to\n Book Search.\n The element can appear only as a child of .\n \"\"\"\n\n _tag = 'identifier'\n _namespace = DC_NAMESPACE\n\n\nclass Publisher(_AtomFromString):\n \"\"\"\n The element contains the name of the entity responsible for \n producing and distributing the volume (usually the specific edition of this\n book). Examples of a publisher include a person, an organization, or a \n service.\n\n This element can appear only as a child of . If there is more than \n one publisher, multiple elements may appear.\n \"\"\"\n\n _tag = 'publisher'\n _namespace = DC_NAMESPACE\n\n\nclass Subject(_AtomFromString):\n \"\"\"\n The element identifies the topic of the book. Usually this is \n a Library of Congress Subject Heading (LCSH) or Book Industry Standards \n and Communications Subject Heading (BISAC).\n\n The element can appear only as a child of . There may \n be multiple elements per entry.\n \"\"\"\n\n _tag = 'subject'\n _namespace = DC_NAMESPACE\n\n\nclass Title(_AtomFromString):\n \"\"\"\n The element contains the title of a book as it was published. If\n a book has a subtitle, it appears as a second element in the book\n result's .\n \"\"\"\n\n _tag = 'title'\n _namespace = DC_NAMESPACE\n\n\nclass Viewability(_AtomFromString):\n \"\"\"\n Google Book Search respects the user's local copyright restrictions. As a \n result, previews or full views of some books are not available in all \n locations. The element indicates whether a book is fully \n viewable, can be previewed, or only has \"about the book\" information. These\n three \"viewability modes\" are the same ones returned by the Dynamic Links \n API.\n\n The element can appear only as a child of .\n\n The value attribute will take the form of the following URIs to represent\n the relevant viewing capability:\n\n Full View: http://schemas.google.com/books/2008#view_all_pages\n Limited Preview: http://schemas.google.com/books/2008#view_partial\n Snippet View/No Preview: http://schemas.google.com/books/2008#view_no_pages\n Unknown view: http://schemas.google.com/books/2008#view_unknown\n \"\"\"\n\n _tag = 'viewability'\n _namespace = BOOK_SEARCH_NAMESPACE\n _attributes = atom.AtomBase._attributes.copy()\n _attributes['value'] = 'value'\n\n def __init__(self, value=None, text=None, \n extension_elements=None, extension_attributes=None):\n self.value = value\n _AtomFromString.__init__(self, extension_elements=extension_elements,\n extension_attributes=extension_attributes, text=text)\n\n\nclass Embeddability(_AtomFromString):\n \"\"\"\n Many of the books found on Google Book Search can be embedded on third-party\n sites using the Embedded Viewer. The element indicates \n whether a particular book result is available for embedding. By definition,\n a book that cannot be previewed on Book Search cannot be embedded on third-\n party sites.\n\n The element can appear only as a child of .\n\n The value attribute will take on one of the following URIs:\n embeddable: http://schemas.google.com/books/2008#embeddable\n not embeddable: http://schemas.google.com/books/2008#not_embeddable\n \"\"\"\n\n _tag = 'embeddability'\n _namespace = BOOK_SEARCH_NAMESPACE\n _attributes = atom.AtomBase._attributes.copy()\n _attributes['value'] = 'value'\n\n def __init__(self, value=None, text=None, extension_elements=None, \n extension_attributes=None):\n self.value = value\n _AtomFromString.__init__(self, extension_elements=extension_elements,\n extension_attributes=extension_attributes, text=text)\n\n\nclass Review(_AtomFromString):\n \"\"\"\n When present, the element contains a user-generated review for\n a given book. This element currently appears only in the user library and \n user annotation feeds, as a child of .\n\n type: text, html, xhtml\n xml:lang: id of the language, a guess, (always two letters?)\n \"\"\"\n\n _tag = 'review'\n _namespace = BOOK_SEARCH_NAMESPACE\n _attributes = atom.AtomBase._attributes.copy()\n _attributes['type'] = 'type'\n _attributes['{http://www.w3.org/XML/1998/namespace}lang'] = 'lang'\n \n def __init__(self, type=None, lang=None, text=None, \n extension_elements=None, extension_attributes=None):\n self.type = type\n self.lang = lang\n _AtomFromString.__init__(self, extension_elements=extension_elements,\n extension_attributes=extension_attributes, text=text)\n\n\nclass Rating(_AtomFromString):\n \"\"\"All attributes must take an integral string between 1 and 5.\n The min, max, and average attributes represent 'community' ratings. The\n value attribute is the user's (of the feed from which the item is fetched,\n not necessarily the authenticated user) rating of the book.\n \"\"\"\n\n _tag = 'rating'\n _namespace = gdata.GDATA_NAMESPACE\n _attributes = atom.AtomBase._attributes.copy()\n _attributes['min'] = 'min'\n _attributes['max'] = 'max'\n _attributes['average'] = 'average'\n _attributes['value'] = 'value'\n\n def __init__(self, min=None, max=None, average=None, value=None, text=None,\n extension_elements=None, extension_attributes=None):\n self.min = min \n self.max = max \n self.average = average\n self.value = value\n _AtomFromString.__init__(self, extension_elements=extension_elements,\n extension_attributes=extension_attributes, text=text)\n\n\nclass Book(_AtomFromString, gdata.GDataEntry):\n \"\"\"\n Represents an from either a search, annotation, library, or single\n item feed. Note that dc_title attribute is the proper title of the volume,\n title is an atom element and may not represent the full title.\n \"\"\"\n\n _tag = 'entry'\n _namespace = atom.ATOM_NAMESPACE\n _children = gdata.GDataEntry._children.copy()\n for i in (Creator, Identifier, Publisher, Subject,):\n _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, [i])\n for i in (Date, Description, Format, Viewability, Embeddability, \n Review, Rating): # Review, Rating maybe only in anno/lib entrys\n _children['{%s}%s' % (i._namespace, i._tag)] = (i._tag, i)\n # there is an atom title as well, should we clobber that?\n del(i)\n _children['{%s}%s' % (Title._namespace, Title._tag)] = ('dc_title', [Title])\n\n def to_dict(self):\n \"\"\"Returns a dictionary of the book's available metadata. If the data\n cannot be discovered, it is not included as a key in the returned dict.\n The possible keys are: authors, embeddability, date, description, \n format, identifiers, publishers, rating, review, subjects, title, and\n viewability.\n\n Notes:\n * Plural keys will be lists\n * Singular keys will be strings\n * Title, despite usually being a list, joins the title and subtitle\n with a space as a single string.\n * embeddability and viewability only return the portion of the URI \n after #\n * identifiers is a list of tuples, where the first item of each tuple\n is the type of identifier and the second item is the identifying\n string. Note that while doing dict() on this tuple may be possible,\n some items may have multiple of the same identifier and converting\n to a dict may resulted in collisions/dropped data.\n * Rating returns only the user's rating. See Rating class for precise\n definition.\n \"\"\"\n d = {}\n if self.GetAnnotationLink():\n d['annotation'] = self.GetAnnotationLink().href\n if self.creator:\n d['authors'] = [x.text for x in self.creator]\n if self.embeddability:\n d['embeddability'] = self.embeddability.value.split('#')[-1]\n if self.date:\n d['date'] = self.date.text\n if self.description:\n d['description'] = self.description.text\n if self.format:\n d['format'] = self.format.text\n if self.identifier:\n d['identifiers'] = [('google_id', self.identifier[0].text)]\n for x in self.identifier[1:]:\n l = x.text.split(':') # should we lower the case of the ids?\n d['identifiers'].append((l[0], ':'.join(l[1:])))\n if self.GetInfoLink():\n d['info'] = self.GetInfoLink().href\n if self.GetPreviewLink():\n d['preview'] = self.GetPreviewLink().href\n if self.publisher:\n d['publishers'] = [x.text for x in self.publisher]\n if self.rating:\n d['rating'] = self.rating.value\n if self.review:\n d['review'] = self.review.text\n if self.subject:\n d['subjects'] = [x.text for x in self.subject]\n if self.GetThumbnailLink():\n d['thumbnail'] = self.GetThumbnailLink().href\n if self.dc_title:\n d['title'] = ' '.join([x.text for x in self.dc_title])\n if self.viewability:\n d['viewability'] = self.viewability.value.split('#')[-1]\n return d\n\n def __init__(self, creator=None, date=None, \n description=None, format=None, author=None, identifier=None, \n publisher=None, subject=None, dc_title=None, viewability=None, \n embeddability=None, review=None, rating=None, category=None, \n content=None, contributor=None, atom_id=None, link=None,\n published=None, rights=None, source=None, summary=None, \n title=None, control=None, updated=None, text=None, \n extension_elements=None, extension_attributes=None):\n self.creator = creator\n self.date = date\n self.description = description\n self.format = format\n self.identifier = identifier\n self.publisher = publisher\n self.subject = subject\n self.dc_title = dc_title or []\n self.viewability = viewability\n self.embeddability = embeddability\n self.review = review\n self.rating = rating\n gdata.GDataEntry.__init__(self, author=author, category=category, \n content=content, contributor=contributor, atom_id=atom_id,\n link=link, published=published, rights=rights, source=source,\n summary=summary, title=title, control=control, updated=updated, \n text=text, extension_elements=extension_elements, \n extension_attributes=extension_attributes)\n \n def GetThumbnailLink(self):\n \"\"\"Returns the atom.Link object representing the thumbnail URI.\"\"\"\n for i in self.link:\n if i.rel == THUMBNAIL_REL:\n return i\n \n def GetInfoLink(self):\n \"\"\"\n Returns the atom.Link object representing the human-readable info URI.\n \"\"\"\n for i in self.link:\n if i.rel == INFO_REL:\n return i\n \n def GetPreviewLink(self):\n \"\"\"Returns the atom.Link object representing the preview URI.\"\"\"\n for i in self.link:\n if i.rel == PREVIEW_REL:\n return i\n \n def GetAnnotationLink(self):\n \"\"\"\n Returns the atom.Link object representing the Annotation URI.\n Note that the use of www.books in the href of this link seems to make\n this information useless. Using books.service.ANNOTATION_FEED and \n BOOK_SERVER to construct your URI seems to work better.\n \"\"\"\n for i in self.link:\n if i.rel == ANNOTATION_REL:\n return i\n \n def set_rating(self, value):\n \"\"\"Set user's rating. Must be an integral string between 1 nad 5\"\"\"\n assert (value in ('1','2','3','4','5'))\n if not isinstance(self.rating, Rating):\n self.rating = Rating()\n self.rating.value = value\n \n def set_review(self, text, type='text', lang='en'):\n \"\"\"Set user's review text\"\"\"\n self.review = Review(text=text, type=type, lang=lang)\n \n def get_label(self):\n \"\"\"Get users label for the item as a string\"\"\"\n for i in self.category:\n if i.scheme == LABEL_SCHEME:\n return i.term\n \n def set_label(self, term):\n \"\"\"Clear pre-existing label for the item and set term as the label.\"\"\"\n self.remove_label()\n self.category.append(atom.Category(term=term, scheme=LABEL_SCHEME))\n \n def remove_label(self):\n \"\"\"Clear the user's label for the item\"\"\"\n ln = len(self.category)\n for i, j in enumerate(self.category[::-1]):\n if j.scheme == LABEL_SCHEME:\n del(self.category[ln-1-i])\n\n def clean_annotations(self):\n \"\"\"Clear all annotations from an item. Useful for taking an item from\n another user's library/annotation feed and adding it to the \n authenticated user's library without adopting annotations.\"\"\"\n self.remove_label()\n self.review = None\n self.rating = None\n\n \n def get_google_id(self):\n \"\"\"Get Google's ID of the item.\"\"\"\n return self.id.text.split('/')[-1]\n\n\nclass BookFeed(_AtomFromString, gdata.GDataFeed):\n \"\"\"Represents a feed of entries from a search.\"\"\"\n\n _tag = 'feed'\n _namespace = atom.ATOM_NAMESPACE\n _children = gdata.GDataFeed._children.copy()\n _children['{%s}%s' % (Book._namespace, Book._tag)] = (Book._tag, [Book])\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testfile('datamodels.txt')\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":7942596538569523000,"string":"7,942,596,538,569,523,000"},"line_mean":{"kind":"number","value":38.1797040169,"string":"38.179704"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.6424023311,"string":"0.642402"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.106359406159982,"string":"4.106359"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":53,"cells":{"repo_name":{"kind":"string","value":"jjmleiro/hue"},"path":{"kind":"string","value":"desktop/core/ext-py/Django-1.6.10/tests/save_delete_hooks/models.py"},"copies":{"kind":"string","value":"130"},"size":{"kind":"string","value":"1035"},"content":{"kind":"string","value":"\"\"\"\n13. Adding hooks before/after saving and deleting\n\nTo execute arbitrary code around ``save()`` and ``delete()``, just subclass\nthe methods.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass Person(models.Model):\n first_name = models.CharField(max_length=20)\n last_name = models.CharField(max_length=20)\n\n def __init__(self, *args, **kwargs):\n super(Person, self).__init__(*args, **kwargs)\n self.data = []\n\n def __str__(self):\n return \"%s %s\" % (self.first_name, self.last_name)\n\n def save(self, *args, **kwargs):\n self.data.append(\"Before save\")\n # Call the \"real\" save() method\n super(Person, self).save(*args, **kwargs)\n self.data.append(\"After save\")\n\n def delete(self):\n self.data.append(\"Before deletion\")\n # Call the \"real\" delete() method\n super(Person, self).delete()\n self.data.append(\"After deletion\")\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":5388654497079564000,"string":"5,388,654,497,079,564,000"},"line_mean":{"kind":"number","value":28.5714285714,"string":"28.571429"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.6357487923,"string":"0.635749"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.7364620938628157,"string":"3.736462"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":54,"cells":{"repo_name":{"kind":"string","value":"kasioumis/invenio"},"path":{"kind":"string","value":"invenio/modules/documentation/views.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3579"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# This file is part of Invenio.\n# Copyright (C) 2013, 2014 CERN.\n#\n# Invenio is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Invenio is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Invenio; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"Documentation Flask Blueprint.\"\"\"\n\nimport os\n\nfrom flask import render_template, current_app, abort, url_for, Blueprint\nfrom flask.helpers import send_from_directory\nfrom werkzeug.utils import cached_property, import_string\n\nfrom sphinx.websupport import WebSupport\nfrom sphinx.websupport.errors import DocumentNotFoundError\n\nfrom invenio.base.globals import cfg\nfrom invenio.base.i18n import _\nfrom flask.ext.breadcrumbs import (default_breadcrumb_root,\n register_breadcrumb,\n current_breadcrumbs)\nfrom flask.ext.menu import register_menu\n\n\nclass DocsBlueprint(Blueprint):\n\n \"\"\"Wrap blueprint with Sphinx ``WebSupport``.\"\"\"\n\n @cached_property\n def documentation_package(self):\n \"\"\"Return documentation package.\"\"\"\n try:\n invenio_docs = import_string(cfg['DOCUMENTATION_PACKAGE'])\n except ImportError:\n import docs as invenio_docs\n return invenio_docs\n\n @cached_property\n def support(self):\n \"\"\"Return an instance of Sphinx ``WebSupport``.\"\"\"\n builddir = os.path.abspath(os.path.join(\n current_app.instance_path, 'docs'))\n return WebSupport(\n srcdir=self.documentation_package.__path__[0],\n builddir=builddir,\n staticroot=os.path.join(blueprint.url_prefix, 'static'),\n docroot=blueprint.url_prefix\n )\n\n def send_static_file(self, filename):\n \"\"\"Return static file.\"\"\"\n try:\n return super(self.__class__, self).send_static_file(filename)\n except:\n cache_timeout = self.get_send_file_max_age(filename)\n return send_from_directory(\n os.path.join(current_app.instance_path, \"docs\", \"static\"),\n filename,\n cache_timeout=cache_timeout)\n\n\nblueprint = DocsBlueprint('documentation', __name__,\n url_prefix=\"/documentation\",\n template_folder='templates', static_folder='static')\n\ndefault_breadcrumb_root(blueprint, '.documentation')\n\n\n@blueprint.route('/', strict_slashes=True)\n@blueprint.route('/')\n@register_menu(blueprint, 'main.documentation', _('Help'), order=99)\n@register_breadcrumb(blueprint, '.', _('Help'))\ndef index(docname=None):\n \"\"\"Render documentation page.\"\"\"\n try:\n document = blueprint.support.get_document(\n docname or cfg[\"DOCUMENTATION_INDEX\"])\n except DocumentNotFoundError:\n abort(404)\n additional_breadcrumbs = [{'text': document['title'],\n 'url': url_for('.index', docname=docname)}]\n return render_template(\n 'documentation/index.html', document=document,\n breadcrumbs=current_breadcrumbs + additional_breadcrumbs)\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":9194547294458207000,"string":"9,194,547,294,458,207,000"},"line_mean":{"kind":"number","value":35.5204081633,"string":"35.520408"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.6577256217,"string":"0.657726"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.260714285714286,"string":"4.260714"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":55,"cells":{"repo_name":{"kind":"string","value":"crosswalk-project/blink-crosswalk-efl"},"path":{"kind":"string","value":"Source/devtools/scripts/concatenate_module_scripts.py"},"copies":{"kind":"string","value":"11"},"size":{"kind":"string","value":"2413"},"content":{"kind":"string","value":"#!/usr/bin/env python\n#\n# Copyright 2014 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"\nConcatenates module scripts based on the module.json descriptor.\nOptionally, minifies the result using rjsmin.\n\"\"\"\n\nfrom cStringIO import StringIO\nfrom os import path\nimport os\nimport re\nimport sys\n\ntry:\n import simplejson as json\nexcept ImportError:\n import json\n\nrjsmin_path = path.abspath(path.join(\n path.dirname(__file__),\n '..',\n '..',\n 'build',\n 'scripts'))\nsys.path.append(rjsmin_path)\nimport rjsmin\n\n\ndef read_file(filename):\n with open(path.normpath(filename), 'rt') as file:\n return file.read()\n\n\ndef write_file(filename, content):\n # This is here to avoid overwriting source tree files due to hard links.\n if path.exists(filename):\n os.remove(filename)\n with open(filename, 'wt') as file:\n file.write(content)\n\n\ndef concatenate_scripts(file_names, module_dir, output_dir, output):\n for file_name in file_names:\n output.write('/* %s */\\n' % file_name)\n file_path = path.join(module_dir, file_name)\n\n # This allows to also concatenate generated files found in output_dir.\n if not path.isfile(file_path):\n file_path = path.join(output_dir, path.basename(module_dir), file_name)\n output.write(read_file(file_path))\n output.write(';')\n\n\ndef main(argv):\n if len(argv) < 3:\n print('Usage: %s module_json output_file no_minify' % argv[0])\n return 1\n\n module_json_file_name = argv[1]\n output_file_name = argv[2]\n no_minify = len(argv) > 3 and argv[3]\n module_dir = path.dirname(module_json_file_name)\n\n output = StringIO()\n descriptor = None\n try:\n descriptor = json.loads(read_file(module_json_file_name))\n except:\n print 'ERROR: Failed to load JSON from ' + module_json_file_name\n raise\n\n # pylint: disable=E1103\n scripts = descriptor.get('scripts')\n assert(scripts)\n output_root_dir = path.join(path.dirname(output_file_name), '..')\n concatenate_scripts(scripts, module_dir, output_root_dir, output)\n\n output_script = output.getvalue()\n output.close()\n write_file(output_file_name, output_script if no_minify else rjsmin.jsmin(output_script))\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-1551874067306360800,"string":"-1,551,874,067,306,360,800"},"line_mean":{"kind":"number","value":26.7356321839,"string":"26.735632"},"line_max":{"kind":"number","value":93,"string":"93"},"alpha_frac":{"kind":"number","value":0.6547865727,"string":"0.654787"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.5485294117647057,"string":"3.548529"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":56,"cells":{"repo_name":{"kind":"string","value":"aman-iitj/scipy"},"path":{"kind":"string","value":"scipy/ndimage/__init__.py"},"copies":{"kind":"string","value":"46"},"size":{"kind":"string","value":"5436"},"content":{"kind":"string","value":"\"\"\"\n=========================================================\nMulti-dimensional image processing (:mod:`scipy.ndimage`)\n=========================================================\n\n.. currentmodule:: scipy.ndimage\n\nThis package contains various functions for multi-dimensional image\nprocessing.\n\n\nFilters :mod:`scipy.ndimage.filters`\n====================================\n\n.. module:: scipy.ndimage.filters\n\n.. autosummary::\n :toctree: generated/\n\n convolve - Multi-dimensional convolution\n convolve1d - 1-D convolution along the given axis\n correlate - Multi-dimensional correlation\n correlate1d - 1-D correlation along the given axis\n gaussian_filter\n gaussian_filter1d\n gaussian_gradient_magnitude\n gaussian_laplace\n generic_filter - Multi-dimensional filter using a given function\n generic_filter1d - 1-D generic filter along the given axis\n generic_gradient_magnitude\n generic_laplace\n laplace - n-D Laplace filter based on approximate second derivatives\n maximum_filter\n maximum_filter1d\n median_filter - Calculates a multi-dimensional median filter\n minimum_filter\n minimum_filter1d\n percentile_filter - Calculates a multi-dimensional percentile filter\n prewitt\n rank_filter - Calculates a multi-dimensional rank filter\n sobel\n uniform_filter - Multi-dimensional uniform filter\n uniform_filter1d - 1-D uniform filter along the given axis\n\nFourier filters :mod:`scipy.ndimage.fourier`\n============================================\n\n.. module:: scipy.ndimage.fourier\n\n.. autosummary::\n :toctree: generated/\n\n fourier_ellipsoid\n fourier_gaussian\n fourier_shift\n fourier_uniform\n\nInterpolation :mod:`scipy.ndimage.interpolation`\n================================================\n\n.. module:: scipy.ndimage.interpolation\n\n.. autosummary::\n :toctree: generated/\n\n affine_transform - Apply an affine transformation\n geometric_transform - Apply an arbritrary geometric transform\n map_coordinates - Map input array to new coordinates by interpolation\n rotate - Rotate an array\n shift - Shift an array\n spline_filter\n spline_filter1d\n zoom - Zoom an array\n\nMeasurements :mod:`scipy.ndimage.measurements`\n==============================================\n\n.. module:: scipy.ndimage.measurements\n\n.. autosummary::\n :toctree: generated/\n\n center_of_mass - The center of mass of the values of an array at labels\n extrema - Min's and max's of an array at labels, with their positions\n find_objects - Find objects in a labeled array\n histogram - Histogram of the values of an array, optionally at labels\n label - Label features in an array\n labeled_comprehension\n maximum\n maximum_position\n mean - Mean of the values of an array at labels\n minimum\n minimum_position\n standard_deviation - Standard deviation of an n-D image array\n sum - Sum of the values of the array\n variance - Variance of the values of an n-D image array\n watershed_ift\n\nMorphology :mod:`scipy.ndimage.morphology`\n==========================================\n\n.. module:: scipy.ndimage.morphology\n\n.. autosummary::\n :toctree: generated/\n\n binary_closing\n binary_dilation\n binary_erosion\n binary_fill_holes\n binary_hit_or_miss\n binary_opening\n binary_propagation\n black_tophat\n distance_transform_bf\n distance_transform_cdt\n distance_transform_edt\n generate_binary_structure\n grey_closing\n grey_dilation\n grey_erosion\n grey_opening\n iterate_structure\n morphological_gradient\n morphological_laplace\n white_tophat\n\nUtility\n=======\n\n.. currentmodule:: scipy.ndimage\n\n.. autosummary::\n :toctree: generated/\n\n imread - Load an image from a file\n\n\"\"\"\n\n# Copyright (C) 2003-2005 Peter J. Verveer\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. The name of the author may not be used to endorse or promote\n# products derived from this software without specific prior\n# written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom .filters import *\nfrom .fourier import *\nfrom .interpolation import *\nfrom .measurements import *\nfrom .morphology import *\nfrom .io import *\n\n__version__ = '2.0'\n\n__all__ = [s for s in dir() if not s.startswith('_')]\nfrom numpy.testing import Tester\ntest = Tester().test\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-2567934379819761000,"string":"-2,567,934,379,819,761,000"},"line_mean":{"kind":"number","value":28.7049180328,"string":"28.704918"},"line_max":{"kind":"number","value":74,"string":"74"},"alpha_frac":{"kind":"number","value":0.7023546726,"string":"0.702355"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.260188087774295,"string":"4.260188"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":57,"cells":{"repo_name":{"kind":"string","value":"JackDandy/SickGear"},"path":{"kind":"string","value":"lib/guessit/transfo/guess_weak_episodes_rexps.py"},"copies":{"kind":"string","value":"21"},"size":{"kind":"string","value":"2127"},"content":{"kind":"string","value":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n#\n# GuessIt - A library for guessing information from filenames\n# Copyright (c) 2012 Nicolas Wack \n#\n# GuessIt is free software; you can redistribute it and/or modify it under\n# the terms of the Lesser GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# GuessIt is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# Lesser GNU General Public License for more details.\n#\n# You should have received a copy of the Lesser GNU General Public License\n# along with this program. If not, see .\n#\n\nfrom __future__ import unicode_literals\nfrom guessit import Guess\nfrom guessit.transfo import SingleNodeGuesser\nfrom guessit.patterns import weak_episode_rexps\nimport re\nimport logging\n\nlog = logging.getLogger(__name__)\n\n\ndef guess_weak_episodes_rexps(string, node):\n if 'episodeNumber' in node.root.info:\n return None, None\n\n for rexp, span_adjust in weak_episode_rexps:\n match = re.search(rexp, string, re.IGNORECASE)\n if match:\n metadata = match.groupdict()\n span = (match.start() + span_adjust[0],\n match.end() + span_adjust[1])\n\n epnum = int(metadata['episodeNumber'])\n if epnum > 100:\n season, epnum = epnum // 100, epnum % 100\n # episodes which have a season > 25 are most likely errors\n # (Simpsons is at 23!)\n if season > 25:\n continue\n return Guess({ 'season': season,\n 'episodeNumber': epnum },\n confidence=0.6), span\n else:\n return Guess(metadata, confidence=0.3), span\n\n return None, None\n\n\nguess_weak_episodes_rexps.use_node = True\n\n\ndef process(mtree):\n SingleNodeGuesser(guess_weak_episodes_rexps, 0.6, log).process(mtree)\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":7319974327149637000,"string":"7,319,974,327,149,637,000"},"line_mean":{"kind":"number","value":33.3064516129,"string":"33.306452"},"line_max":{"kind":"number","value":74,"string":"74"},"alpha_frac":{"kind":"number","value":0.6431593794,"string":"0.643159"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.9981203007518795,"string":"3.99812"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":58,"cells":{"repo_name":{"kind":"string","value":"mrknow/filmkodi"},"path":{"kind":"string","value":"plugin.video.fanfilm/resources/lib/resolvers/putstream.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"1131"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\n'''\n FanFilm Add-on\n Copyright (C) 2015 lambda\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n'''\n\n\nimport re\nfrom resources.lib.libraries import client\n\n\ndef resolve(url):\n try:\n url = url.replace('/embed-', '/')\n url = re.compile('//.+?/([\\w]+)').findall(url)[0]\n url = 'http://putstream.com/embed-%s.html' % url\n\n result = client.request(url)\n\n url = re.compile('file *: *\"(http.+?)\"').findall(result)[-1]\n return url\n except:\n return\n\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":8110821232460552000,"string":"8,110,821,232,460,552,000"},"line_mean":{"kind":"number","value":28.7631578947,"string":"28.763158"},"line_max":{"kind":"number","value":73,"string":"73"},"alpha_frac":{"kind":"number","value":0.6542882405,"string":"0.654288"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.127737226277373,"string":"4.127737"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":59,"cells":{"repo_name":{"kind":"string","value":"joewashear007/ScrappyDoo"},"path":{"kind":"string","value":"copy.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"3382"},"content":{"kind":"string","value":"import os\nimport shutil\nimport zipfile\nimport fnmatch\nimport uuid\n\ndef main():\n kits = findAll(\".\")\n for kit in kits:\n print(\"* \", kit, \" -> \", kits[kit])\n print()\n print()\n print(\"Starting extraction:\")\n print(\"------------------------------------------\")\n extractKits(kits)\n\ndef findAll(dir):\n print()\n print(\"All zip files:\")\n print(\"---------------------------\")\n\n kits = {}\n files = os.listdir(\".\")\n for file in files:\n if file.endswith(\".zip\"):\n kits[file] = getType(file)\n return kits\n\ndef getType(file):\n if \"-pp\" in file:\n return \"paper\"\n if \"-ap\" in file:\n return \"alpha\"\n if \"-ep\" in file:\n return \"embellishment\"\n\n options = {1: \"embellishment\", 2: \"alpha\", 3: \"paper\", 4:\"other\"}\n #DEBUG:\n return options[1];\n goodInput = False\n while not goodInput:\n print()\n print(\"File: \", file)\n print(\" 1) Embellishment\")\n print(\" 2) Alpha\")\n print(\" 3) Paper\")\n print(\" 4) Other\")\n action = input(\"Please Enter the Number (default = 1):\")\n if action is \"\":\n return options[1];\n if action.isdigit():\n actionNum = int(action)\n if actionNum > 0 and actionNum < len(options)+1:\n return options[actionNum]\n\n\ndef extractKits(kits):\n tmpDir = \"./tmp\";\n kitNames = {}\n x = 0\n for kit in kits:\n # kit = next(iter(kits.keys()))\n x = x + 1\n print()\n print()\n print()\n print(\"Extracting: \", kit, \" ( \", x, \" of \", len(kits), \")\")\n kitStr = kit.rsplit(\"-\", 1)[0]\n print(\"Kit Name: \", kitStr)\n if kitStr in kitNames:\n name = input(\"Please Enter Kit Name (default = \"+kitNames[kitStr]+\"): \")\n name = name or kitNames[kitStr]\n else:\n name = input(\"Please Enter Kit Name: \")\n kitNames[kitStr] =name\n\n if os.path.exists(tmpDir):\n shutil.rmtree(tmpDir)\n else:\n os.makedirs(tmpDir)\n\n if not os.path.exists(\"./\" + name):\n os.makedirs(\"./\" + name)\n kitzip = zipfile.ZipFile(\"./\" + kit)\n kitzip.extractall(tmpDir)\n images = copyExtractedFiles(\"./\" + name +\"/\")\n createManifest(kit, name, images, kits[kit])\n\ndef copyExtractedFiles(dest):\n matches = []\n filenames = [\".png\", \".jpg\"]\n for rootpath, subdirs, files in os.walk(\"./tmp\"):\n for filename in files:\n if os.path.splitext(filename)[1].lower() in filenames:\n # print(os.path.join(rootpath, filename).replace('\\\\','/'))\n shutil.move(os.path.join(rootpath, filename).replace('\\\\','/'), dest+filename)\n matches.append(dest + filename)\n return matches\n\n\ndef createManifest(kit, name, images, type):\n manifest = []\n manifest.append('')\n manifest.append('')\n manifest.append('')\n for image in images:\n manifest.append('')\n manifest.append('')\n manifest.append('')\n\n with open('./'+name+'/package.manifestx', 'w') as f:\n for line in manifest:\n f.write(line + os.linesep)\n\n\nif __name__ == \"__main__\":\n main()\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":7577268462791354000,"string":"7,577,268,462,791,354,000"},"line_mean":{"kind":"number","value":27.905982906,"string":"27.905983"},"line_max":{"kind":"number","value":102,"string":"102"},"alpha_frac":{"kind":"number","value":0.5277942046,"string":"0.527794"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.7042716319824756,"string":"3.704272"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":60,"cells":{"repo_name":{"kind":"string","value":"sasukeh/neutron"},"path":{"kind":"string","value":"neutron/api/rpc/callbacks/resource_manager.py"},"copies":{"kind":"string","value":"32"},"size":{"kind":"string","value":"4710"},"content":{"kind":"string","value":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport abc\nimport collections\n\nfrom oslo_log import log as logging\nimport six\n\nfrom neutron.api.rpc.callbacks import exceptions as rpc_exc\nfrom neutron.api.rpc.callbacks import resources\nfrom neutron.callbacks import exceptions\n\nLOG = logging.getLogger(__name__)\n\n# TODO(QoS): split the registry/resources_rpc modules into two separate things:\n# one for pull and one for push APIs\n\n\ndef _validate_resource_type(resource_type):\n if not resources.is_valid_resource_type(resource_type):\n raise exceptions.Invalid(element='resource', value=resource_type)\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass ResourceCallbacksManager(object):\n \"\"\"A callback system that allows information providers in a loose manner.\n \"\"\"\n\n # This hook is to allow tests to get new objects for the class\n _singleton = True\n\n def __new__(cls, *args, **kwargs):\n if not cls._singleton:\n return super(ResourceCallbacksManager, cls).__new__(cls)\n\n if not hasattr(cls, '_instance'):\n cls._instance = super(ResourceCallbacksManager, cls).__new__(cls)\n return cls._instance\n\n @abc.abstractmethod\n def _add_callback(self, callback, resource_type):\n pass\n\n @abc.abstractmethod\n def _delete_callback(self, callback, resource_type):\n pass\n\n def register(self, callback, resource_type):\n \"\"\"Register a callback for a resource type.\n\n :param callback: the callback. It must raise or return NeutronObject.\n :param resource_type: must be a valid resource type.\n \"\"\"\n LOG.debug(\"Registering callback for %s\", resource_type)\n _validate_resource_type(resource_type)\n self._add_callback(callback, resource_type)\n\n def unregister(self, callback, resource_type):\n \"\"\"Unregister callback from the registry.\n\n :param callback: the callback.\n :param resource_type: must be a valid resource type.\n \"\"\"\n LOG.debug(\"Unregistering callback for %s\", resource_type)\n _validate_resource_type(resource_type)\n self._delete_callback(callback, resource_type)\n\n @abc.abstractmethod\n def clear(self):\n \"\"\"Brings the manager to a clean state.\"\"\"\n\n def get_subscribed_types(self):\n return list(self._callbacks.keys())\n\n\nclass ProducerResourceCallbacksManager(ResourceCallbacksManager):\n\n _callbacks = dict()\n\n def _add_callback(self, callback, resource_type):\n if resource_type in self._callbacks:\n raise rpc_exc.CallbacksMaxLimitReached(resource_type=resource_type)\n self._callbacks[resource_type] = callback\n\n def _delete_callback(self, callback, resource_type):\n try:\n del self._callbacks[resource_type]\n except KeyError:\n raise rpc_exc.CallbackNotFound(resource_type=resource_type)\n\n def clear(self):\n self._callbacks = dict()\n\n def get_callback(self, resource_type):\n _validate_resource_type(resource_type)\n try:\n return self._callbacks[resource_type]\n except KeyError:\n raise rpc_exc.CallbackNotFound(resource_type=resource_type)\n\n\nclass ConsumerResourceCallbacksManager(ResourceCallbacksManager):\n\n _callbacks = collections.defaultdict(set)\n\n def _add_callback(self, callback, resource_type):\n self._callbacks[resource_type].add(callback)\n\n def _delete_callback(self, callback, resource_type):\n try:\n self._callbacks[resource_type].remove(callback)\n if not self._callbacks[resource_type]:\n del self._callbacks[resource_type]\n except KeyError:\n raise rpc_exc.CallbackNotFound(resource_type=resource_type)\n\n def clear(self):\n self._callbacks = collections.defaultdict(set)\n\n def get_callbacks(self, resource_type):\n \"\"\"Return the callback if found, None otherwise.\n\n :param resource_type: must be a valid resource type.\n \"\"\"\n _validate_resource_type(resource_type)\n callbacks = self._callbacks[resource_type]\n if not callbacks:\n raise rpc_exc.CallbackNotFound(resource_type=resource_type)\n return callbacks\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-3525136190414913000,"string":"-3,525,136,190,414,913,000"},"line_mean":{"kind":"number","value":32.8848920863,"string":"32.884892"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6791932059,"string":"0.679193"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.313186813186813,"string":"4.313187"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":61,"cells":{"repo_name":{"kind":"string","value":"listamilton/supermilton.repository"},"path":{"kind":"string","value":"script.module.youtube.dl/lib/youtube_dl/extractor/moniker.py"},"copies":{"kind":"string","value":"66"},"size":{"kind":"string","value":"3951"},"content":{"kind":"string","value":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport os.path\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n ExtractorError,\n remove_start,\n sanitized_Request,\n urlencode_postdata,\n)\n\n\nclass MonikerIE(InfoExtractor):\n IE_DESC = 'allmyvideos.net and vidspot.net'\n _VALID_URL = r'https?://(?:www\\.)?(?:allmyvideos|vidspot)\\.net/(?:(?:2|v)/v-)?(?P[a-zA-Z0-9_-]+)'\n\n _TESTS = [{\n 'url': 'http://allmyvideos.net/jih3nce3x6wn',\n 'md5': '710883dee1bfc370ecf9fa6a89307c88',\n 'info_dict': {\n 'id': 'jih3nce3x6wn',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video',\n },\n }, {\n 'url': 'http://allmyvideos.net/embed-jih3nce3x6wn',\n 'md5': '710883dee1bfc370ecf9fa6a89307c88',\n 'info_dict': {\n 'id': 'jih3nce3x6wn',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video',\n },\n }, {\n 'url': 'http://vidspot.net/l2ngsmhs8ci5',\n 'md5': '710883dee1bfc370ecf9fa6a89307c88',\n 'info_dict': {\n 'id': 'l2ngsmhs8ci5',\n 'ext': 'mp4',\n 'title': 'youtube-dl test video',\n },\n }, {\n 'url': 'https://www.vidspot.net/l2ngsmhs8ci5',\n 'only_matching': True,\n }, {\n 'url': 'http://vidspot.net/2/v-ywDf99',\n 'md5': '5f8254ce12df30479428b0152fb8e7ba',\n 'info_dict': {\n 'id': 'ywDf99',\n 'ext': 'mp4',\n 'title': 'IL FAIT LE MALIN EN PORSHE CAYENNE ( mais pas pour longtemps)',\n 'description': 'IL FAIT LE MALIN EN PORSHE CAYENNE.',\n },\n }, {\n 'url': 'http://allmyvideos.net/v/v-HXZm5t',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n orig_video_id = self._match_id(url)\n video_id = remove_start(orig_video_id, 'embed-')\n url = url.replace(orig_video_id, video_id)\n assert re.match(self._VALID_URL, url) is not None\n orig_webpage = self._download_webpage(url, video_id)\n\n if '>File Not Found<' in orig_webpage:\n raise ExtractorError('Video %s does not exist' % video_id, expected=True)\n\n error = self._search_regex(\n r'class=\"err\">([^<]+)<', orig_webpage, 'error', default=None)\n if error:\n raise ExtractorError(\n '%s returned error: %s' % (self.IE_NAME, error), expected=True)\n\n builtin_url = self._search_regex(\n r']+src=([\"\\'])(?P.+?/builtin-.+?)\\1',\n orig_webpage, 'builtin URL', default=None, group='url')\n\n if builtin_url:\n req = sanitized_Request(builtin_url)\n req.add_header('Referer', url)\n webpage = self._download_webpage(req, video_id, 'Downloading builtin page')\n title = self._og_search_title(orig_webpage).strip()\n description = self._og_search_description(orig_webpage).strip()\n else:\n fields = re.findall(r'type=\"hidden\" name=\"(.+?)\"\\s* value=\"?(.+?)\">', orig_webpage)\n data = dict(fields)\n\n post = urlencode_postdata(data)\n headers = {\n b'Content-Type': b'application/x-www-form-urlencoded',\n }\n req = sanitized_Request(url, post, headers)\n webpage = self._download_webpage(\n req, video_id, note='Downloading video page ...')\n\n title = os.path.splitext(data['fname'])[0]\n description = None\n\n # Could be several links with different quality\n links = re.findall(r'\"file\" : \"?(.+?)\",', webpage)\n # Assume the links are ordered in quality\n formats = [{\n 'url': l,\n 'quality': i,\n } for i, l in enumerate(links)]\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'formats': formats,\n }\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":2729320049983951400,"string":"2,729,320,049,983,951,400"},"line_mean":{"kind":"number","value":33.0603448276,"string":"33.060345"},"line_max":{"kind":"number","value":105,"string":"105"},"alpha_frac":{"kind":"number","value":0.529486206,"string":"0.529486"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.3943298969072164,"string":"3.39433"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":62,"cells":{"repo_name":{"kind":"string","value":"gohin/django"},"path":{"kind":"string","value":"django/contrib/auth/management/commands/createsuperuser.py"},"copies":{"kind":"string","value":"65"},"size":{"kind":"string","value":"7695"},"content":{"kind":"string","value":"\"\"\"\nManagement utility to create superusers.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport getpass\nimport sys\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.management import get_default_username\nfrom django.core import exceptions\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db import DEFAULT_DB_ALIAS\nfrom django.utils.encoding import force_str\nfrom django.utils.six.moves import input\nfrom django.utils.text import capfirst\n\n\nclass NotRunningInTTYException(Exception):\n pass\n\n\nclass Command(BaseCommand):\n help = 'Used to create a superuser.'\n\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.UserModel = get_user_model()\n self.username_field = self.UserModel._meta.get_field(self.UserModel.USERNAME_FIELD)\n\n def add_arguments(self, parser):\n parser.add_argument('--%s' % self.UserModel.USERNAME_FIELD,\n dest=self.UserModel.USERNAME_FIELD, default=None,\n help='Specifies the login for the superuser.')\n parser.add_argument('--noinput', action='store_false', dest='interactive', default=True,\n help=('Tells Django to NOT prompt the user for input of any kind. '\n 'You must use --%s with --noinput, along with an option for '\n 'any other required field. Superusers created with --noinput will '\n ' not be able to log in until they\\'re given a valid password.' %\n self.UserModel.USERNAME_FIELD))\n parser.add_argument('--database', action='store', dest='database',\n default=DEFAULT_DB_ALIAS,\n help='Specifies the database to use. Default is \"default\".')\n for field in self.UserModel.REQUIRED_FIELDS:\n parser.add_argument('--%s' % field, dest=field, default=None,\n help='Specifies the %s for the superuser.' % field)\n\n def execute(self, *args, **options):\n self.stdin = options.get('stdin', sys.stdin) # Used for testing\n return super(Command, self).execute(*args, **options)\n\n def handle(self, *args, **options):\n username = options.get(self.UserModel.USERNAME_FIELD)\n database = options.get('database')\n\n # If not provided, create the user with an unusable password\n password = None\n user_data = {}\n\n # Do quick and dirty validation if --noinput\n if not options['interactive']:\n try:\n if not username:\n raise CommandError(\"You must use --%s with --noinput.\" %\n self.UserModel.USERNAME_FIELD)\n username = self.username_field.clean(username, None)\n\n for field_name in self.UserModel.REQUIRED_FIELDS:\n if options.get(field_name):\n field = self.UserModel._meta.get_field(field_name)\n user_data[field_name] = field.clean(options[field_name], None)\n else:\n raise CommandError(\"You must use --%s with --noinput.\" % field_name)\n except exceptions.ValidationError as e:\n raise CommandError('; '.join(e.messages))\n\n else:\n # Prompt for username/password, and any other required fields.\n # Enclose this whole thing in a try/except to catch\n # KeyboardInterrupt and exit gracefully.\n default_username = get_default_username()\n try:\n\n if hasattr(self.stdin, 'isatty') and not self.stdin.isatty():\n raise NotRunningInTTYException(\"Not running in a TTY\")\n\n # Get a username\n verbose_field_name = self.username_field.verbose_name\n while username is None:\n input_msg = capfirst(verbose_field_name)\n if default_username:\n input_msg += \" (leave blank to use '%s')\" % default_username\n username_rel = self.username_field.remote_field\n input_msg = force_str('%s%s: ' % (\n input_msg,\n ' (%s.%s)' % (\n username_rel.model._meta.object_name,\n username_rel.field_name\n ) if username_rel else '')\n )\n username = self.get_input_data(self.username_field, input_msg, default_username)\n if not username:\n continue\n if self.username_field.unique:\n try:\n self.UserModel._default_manager.db_manager(database).get_by_natural_key(username)\n except self.UserModel.DoesNotExist:\n pass\n else:\n self.stderr.write(\"Error: That %s is already taken.\" % verbose_field_name)\n username = None\n\n for field_name in self.UserModel.REQUIRED_FIELDS:\n field = self.UserModel._meta.get_field(field_name)\n user_data[field_name] = options.get(field_name)\n while user_data[field_name] is None:\n message = force_str('%s%s: ' % (\n capfirst(field.verbose_name),\n ' (%s.%s)' % (\n field.remote_field.model._meta.object_name,\n field.remote_field.field_name,\n ) if field.remote_field else '',\n ))\n user_data[field_name] = self.get_input_data(field, message)\n\n # Get a password\n while password is None:\n if not password:\n password = getpass.getpass()\n password2 = getpass.getpass(force_str('Password (again): '))\n if password != password2:\n self.stderr.write(\"Error: Your passwords didn't match.\")\n password = None\n continue\n if password.strip() == '':\n self.stderr.write(\"Error: Blank passwords aren't allowed.\")\n password = None\n continue\n\n except KeyboardInterrupt:\n self.stderr.write(\"\\nOperation cancelled.\")\n sys.exit(1)\n\n except NotRunningInTTYException:\n self.stdout.write(\n \"Superuser creation skipped due to not running in a TTY. \"\n \"You can run `manage.py createsuperuser` in your project \"\n \"to create one manually.\"\n )\n\n if username:\n user_data[self.UserModel.USERNAME_FIELD] = username\n user_data['password'] = password\n self.UserModel._default_manager.db_manager(database).create_superuser(**user_data)\n if options['verbosity'] >= 1:\n self.stdout.write(\"Superuser created successfully.\")\n\n def get_input_data(self, field, message, default=None):\n \"\"\"\n Override this method if you want to customize data inputs or\n validation exceptions.\n \"\"\"\n raw_value = input(message)\n if default and raw_value == '':\n raw_value = default\n try:\n val = field.clean(raw_value, None)\n except exceptions.ValidationError as e:\n self.stderr.write(\"Error: %s\" % '; '.join(e.messages))\n val = None\n\n return val\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":6320757852442747000,"string":"6,320,757,852,442,747,000"},"line_mean":{"kind":"number","value":43.738372093,"string":"43.738372"},"line_max":{"kind":"number","value":109,"string":"109"},"alpha_frac":{"kind":"number","value":0.538531514,"string":"0.538532"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.764705882352941,"string":"4.764706"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":63,"cells":{"repo_name":{"kind":"string","value":"wwright2/dcim3-angstrom1"},"path":{"kind":"string","value":"sources/openembedded-core/scripts/pybootchartgui/pybootchartgui/samples.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"5537"},"content":{"kind":"string","value":"# This file is part of pybootchartgui.\n\n# pybootchartgui is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# pybootchartgui is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with pybootchartgui. If not, see .\n\n\nclass DiskStatSample:\n def __init__(self, time):\n self.time = time\n self.diskdata = [0, 0, 0]\n def add_diskdata(self, new_diskdata):\n self.diskdata = [ a + b for a, b in zip(self.diskdata, new_diskdata) ]\n\nclass CPUSample:\n def __init__(self, time, user, sys, io = 0.0, swap = 0.0):\n self.time = time\n self.user = user\n self.sys = sys\n self.io = io\n self.swap = swap\n\n @property\n def cpu(self):\n return self.user + self.sys\n\n def __str__(self):\n return str(self.time) + \"\\t\" + str(self.user) + \"\\t\" + \\\n str(self.sys) + \"\\t\" + str(self.io) + \"\\t\" + str (self.swap)\n\nclass MemSample:\n used_values = ('MemTotal', 'MemFree', 'Buffers', 'Cached', 'SwapTotal', 'SwapFree',)\n\n def __init__(self, time):\n self.time = time\n self.records = {}\n\n def add_value(self, name, value):\n if name in MemSample.used_values:\n self.records[name] = value\n\n def valid(self):\n keys = self.records.keys()\n # discard incomplete samples\n return [v for v in MemSample.used_values if v not in keys] == []\n\nclass ProcessSample:\n def __init__(self, time, state, cpu_sample):\n self.time = time\n self.state = state\n self.cpu_sample = cpu_sample\n\n def __str__(self):\n return str(self.time) + \"\\t\" + str(self.state) + \"\\t\" + str(self.cpu_sample)\n\nclass ProcessStats:\n def __init__(self, writer, process_map, sample_count, sample_period, start_time, end_time):\n self.process_map = process_map\n self.sample_count = sample_count\n self.sample_period = sample_period\n self.start_time = start_time\n self.end_time = end_time\n writer.info (\"%d samples, avg. sample length %f\" % (self.sample_count, self.sample_period))\n writer.info (\"process list size: %d\" % len (self.process_map.values()))\n\nclass Process:\n def __init__(self, writer, pid, cmd, ppid, start_time):\n self.writer = writer\n self.pid = pid\n self.cmd = cmd\n self.exe = cmd\n self.args = []\n self.ppid = ppid\n self.start_time = start_time\n self.duration = 0\n self.samples = []\n self.parent = None\n self.child_list = []\n\n self.active = None\n self.last_user_cpu_time = None\n self.last_sys_cpu_time = None\n\n self.last_cpu_ns = 0\n self.last_blkio_delay_ns = 0\n self.last_swapin_delay_ns = 0\n\n # split this process' run - triggered by a name change\n def split(self, writer, pid, cmd, ppid, start_time):\n split = Process (writer, pid, cmd, ppid, start_time)\n\n split.last_cpu_ns = self.last_cpu_ns\n split.last_blkio_delay_ns = self.last_blkio_delay_ns\n split.last_swapin_delay_ns = self.last_swapin_delay_ns\n\n return split\n\n def __str__(self):\n return \" \".join([str(self.pid), self.cmd, str(self.ppid), '[ ' + str(len(self.samples)) + ' samples ]' ])\n\n def calc_stats(self, samplePeriod):\n if self.samples:\n firstSample = self.samples[0]\n lastSample = self.samples[-1]\n self.start_time = min(firstSample.time, self.start_time)\n self.duration = lastSample.time - self.start_time + samplePeriod\n\n activeCount = sum( [1 for sample in self.samples if sample.cpu_sample and sample.cpu_sample.sys + sample.cpu_sample.user + sample.cpu_sample.io > 0.0] )\n activeCount = activeCount + sum( [1 for sample in self.samples if sample.state == 'D'] )\n self.active = (activeCount>2)\n\n def calc_load(self, userCpu, sysCpu, interval):\n userCpuLoad = float(userCpu - self.last_user_cpu_time) / interval\n sysCpuLoad = float(sysCpu - self.last_sys_cpu_time) / interval\n cpuLoad = userCpuLoad + sysCpuLoad\n # normalize\n if cpuLoad > 1.0:\n userCpuLoad = userCpuLoad / cpuLoad\n sysCpuLoad = sysCpuLoad / cpuLoad\n return (userCpuLoad, sysCpuLoad)\n\n def set_parent(self, processMap):\n if self.ppid != None:\n self.parent = processMap.get (self.ppid)\n if self.parent == None and self.pid // 1000 > 1 and \\\n not (self.ppid == 2000 or self.pid == 2000): # kernel threads: ppid=2\n self.writer.warn(\"Missing CONFIG_PROC_EVENTS: no parent for pid '%i' ('%s') with ppid '%i'\" \\\n % (self.pid,self.cmd,self.ppid))\n\n def get_end_time(self):\n return self.start_time + self.duration\n\nclass DiskSample:\n def __init__(self, time, read, write, util):\n self.time = time\n self.read = read\n self.write = write\n self.util = util\n self.tput = read + write\n\n def __str__(self):\n return \"\\t\".join([str(self.time), str(self.read), str(self.write), str(self.util)])\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":4583296552943260700,"string":"4,583,296,552,943,260,700"},"line_mean":{"kind":"number","value":35.6688741722,"string":"35.668874"},"line_max":{"kind":"number","value":160,"string":"160"},"alpha_frac":{"kind":"number","value":0.6014087051,"string":"0.601409"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.5044303797468355,"string":"3.50443"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":64,"cells":{"repo_name":{"kind":"string","value":"rickerc/neutron_audit"},"path":{"kind":"string","value":"neutron/plugins/nec/ofc_manager.py"},"copies":{"kind":"string","value":"9"},"size":{"kind":"string","value":"9461"},"content":{"kind":"string","value":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n#\n# Copyright 2012 NEC Corporation. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n# @author: Ryota MIBU\n# @author: Akihiro MOTOKI\n\nimport netaddr\n\nfrom neutron.common import utils\nfrom neutron.openstack.common import log as logging\nfrom neutron.plugins.nec.common import config\nfrom neutron.plugins.nec.common import exceptions as nexc\nfrom neutron.plugins.nec.db import api as ndb\nfrom neutron.plugins.nec import drivers\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass OFCManager(object):\n \"\"\"This class manages an OpenFlow Controller and map resources.\n\n This class manage an OpenFlow Controller (OFC) with a driver specified in\n a configuration of this plugin. This keeps mappings between IDs on Neutron\n and OFC for various entities such as Tenant, Network and Filter. A Port on\n OFC is identified by a switch ID 'datapath_id' and a port number 'port_no'\n of the switch. An ID named as 'ofc_*' is used to identify resource on OFC.\n \"\"\"\n\n def __init__(self):\n self.driver = drivers.get_driver(config.OFC.driver)(config.OFC)\n\n def _get_ofc_id(self, context, resource, neutron_id):\n return ndb.get_ofc_id_lookup_both(context.session,\n resource, neutron_id)\n\n def _exists_ofc_item(self, context, resource, neutron_id):\n return ndb.exists_ofc_item_lookup_both(context.session,\n resource, neutron_id)\n\n def _add_ofc_item(self, context, resource, neutron_id, ofc_id):\n # Ensure a new item is added to the new mapping table\n ndb.add_ofc_item(context.session, resource, neutron_id, ofc_id)\n\n def _del_ofc_item(self, context, resource, neutron_id):\n ndb.del_ofc_item_lookup_both(context.session, resource, neutron_id)\n\n def ensure_ofc_tenant(self, context, tenant_id):\n if not self.exists_ofc_tenant(context, tenant_id):\n self.create_ofc_tenant(context, tenant_id)\n\n def create_ofc_tenant(self, context, tenant_id):\n desc = \"ID=%s at OpenStack.\" % tenant_id\n ofc_tenant_id = self.driver.create_tenant(desc, tenant_id)\n self._add_ofc_item(context, \"ofc_tenant\", tenant_id, ofc_tenant_id)\n\n def exists_ofc_tenant(self, context, tenant_id):\n return self._exists_ofc_item(context, \"ofc_tenant\", tenant_id)\n\n def delete_ofc_tenant(self, context, tenant_id):\n ofc_tenant_id = self._get_ofc_id(context, \"ofc_tenant\", tenant_id)\n ofc_tenant_id = self.driver.convert_ofc_tenant_id(\n context, ofc_tenant_id)\n\n self.driver.delete_tenant(ofc_tenant_id)\n self._del_ofc_item(context, \"ofc_tenant\", tenant_id)\n\n def create_ofc_network(self, context, tenant_id, network_id,\n network_name=None):\n ofc_tenant_id = self._get_ofc_id(context, \"ofc_tenant\", tenant_id)\n ofc_tenant_id = self.driver.convert_ofc_tenant_id(\n context, ofc_tenant_id)\n\n desc = \"ID=%s Name=%s at Neutron.\" % (network_id, network_name)\n ofc_net_id = self.driver.create_network(ofc_tenant_id, desc,\n network_id)\n self._add_ofc_item(context, \"ofc_network\", network_id, ofc_net_id)\n\n def exists_ofc_network(self, context, network_id):\n return self._exists_ofc_item(context, \"ofc_network\", network_id)\n\n def delete_ofc_network(self, context, network_id, network):\n ofc_net_id = self._get_ofc_id(context, \"ofc_network\", network_id)\n ofc_net_id = self.driver.convert_ofc_network_id(\n context, ofc_net_id, network['tenant_id'])\n self.driver.delete_network(ofc_net_id)\n self._del_ofc_item(context, \"ofc_network\", network_id)\n\n def create_ofc_port(self, context, port_id, port):\n ofc_net_id = self._get_ofc_id(context, \"ofc_network\",\n port['network_id'])\n ofc_net_id = self.driver.convert_ofc_network_id(\n context, ofc_net_id, port['tenant_id'])\n portinfo = ndb.get_portinfo(context.session, port_id)\n if not portinfo:\n raise nexc.PortInfoNotFound(id=port_id)\n\n ofc_port_id = self.driver.create_port(ofc_net_id, portinfo, port_id)\n self._add_ofc_item(context, \"ofc_port\", port_id, ofc_port_id)\n\n def exists_ofc_port(self, context, port_id):\n return self._exists_ofc_item(context, \"ofc_port\", port_id)\n\n def delete_ofc_port(self, context, port_id, port):\n ofc_port_id = self._get_ofc_id(context, \"ofc_port\", port_id)\n ofc_port_id = self.driver.convert_ofc_port_id(\n context, ofc_port_id, port['tenant_id'], port['network_id'])\n self.driver.delete_port(ofc_port_id)\n self._del_ofc_item(context, \"ofc_port\", port_id)\n\n def create_ofc_packet_filter(self, context, filter_id, filter_dict):\n ofc_net_id = self._get_ofc_id(context, \"ofc_network\",\n filter_dict['network_id'])\n ofc_net_id = self.driver.convert_ofc_network_id(\n context, ofc_net_id, filter_dict['tenant_id'])\n in_port_id = filter_dict.get('in_port')\n portinfo = None\n if in_port_id:\n portinfo = ndb.get_portinfo(context.session, in_port_id)\n if not portinfo:\n raise nexc.PortInfoNotFound(id=in_port_id)\n\n ofc_pf_id = self.driver.create_filter(ofc_net_id,\n filter_dict, portinfo, filter_id)\n self._add_ofc_item(context, \"ofc_packet_filter\", filter_id, ofc_pf_id)\n\n def exists_ofc_packet_filter(self, context, filter_id):\n return self._exists_ofc_item(context, \"ofc_packet_filter\", filter_id)\n\n def delete_ofc_packet_filter(self, context, filter_id):\n ofc_pf_id = self._get_ofc_id(context, \"ofc_packet_filter\", filter_id)\n ofc_pf_id = self.driver.convert_ofc_filter_id(context, ofc_pf_id)\n\n self.driver.delete_filter(ofc_pf_id)\n self._del_ofc_item(context, \"ofc_packet_filter\", filter_id)\n\n def create_ofc_router(self, context, tenant_id, router_id, name=None):\n ofc_tenant_id = self._get_ofc_id(context, \"ofc_tenant\", tenant_id)\n ofc_tenant_id = self.driver.convert_ofc_tenant_id(\n context, ofc_tenant_id)\n\n desc = \"ID=%s Name=%s at Neutron.\" % (router_id, name)\n ofc_router_id = self.driver.create_router(ofc_tenant_id, router_id,\n desc)\n self._add_ofc_item(context, \"ofc_router\", router_id, ofc_router_id)\n\n def exists_ofc_router(self, context, router_id):\n return self._exists_ofc_item(context, \"ofc_router\", router_id)\n\n def delete_ofc_router(self, context, router_id, router):\n ofc_router_id = self._get_ofc_id(context, \"ofc_router\", router_id)\n self.driver.delete_router(ofc_router_id)\n self._del_ofc_item(context, \"ofc_router\", router_id)\n\n def add_ofc_router_interface(self, context, router_id, port_id, port):\n # port must have the following fields:\n # network_id, cidr, ip_address, mac_address\n ofc_router_id = self._get_ofc_id(context, \"ofc_router\", router_id)\n ofc_net_id = self._get_ofc_id(context, \"ofc_network\",\n port['network_id'])\n ip_address = '%s/%s' % (port['ip_address'],\n netaddr.IPNetwork(port['cidr']).prefixlen)\n mac_address = port['mac_address']\n ofc_inf_id = self.driver.add_router_interface(\n ofc_router_id, ofc_net_id, ip_address, mac_address)\n # Use port mapping table to maintain an interface of OFC router\n self._add_ofc_item(context, \"ofc_port\", port_id, ofc_inf_id)\n\n def delete_ofc_router_interface(self, context, router_id, port_id):\n # Use port mapping table to maintain an interface of OFC router\n ofc_inf_id = self._get_ofc_id(context, \"ofc_port\", port_id)\n self.driver.delete_router_interface(ofc_inf_id)\n self._del_ofc_item(context, \"ofc_port\", port_id)\n\n def update_ofc_router_route(self, context, router_id, new_routes):\n ofc_router_id = self._get_ofc_id(context, \"ofc_router\", router_id)\n ofc_routes = self.driver.list_router_routes(ofc_router_id)\n route_dict = {}\n cur_routes = []\n for r in ofc_routes:\n key = ','.join((r['destination'], r['nexthop']))\n route_dict[key] = r['id']\n del r['id']\n cur_routes.append(r)\n added, removed = utils.diff_list_of_dict(cur_routes, new_routes)\n for r in removed:\n key = ','.join((r['destination'], r['nexthop']))\n route_id = route_dict[key]\n self.driver.delete_router_route(route_id)\n for r in added:\n self.driver.add_router_route(ofc_router_id, r['destination'],\n r['nexthop'])\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-5019626671642997000,"string":"-5,019,626,671,642,997,000"},"line_mean":{"kind":"number","value":45.3774509804,"string":"45.377451"},"line_max":{"kind":"number","value":79,"string":"79"},"alpha_frac":{"kind":"number","value":0.6214987845,"string":"0.621499"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.4216998191681736,"string":"3.4217"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":65,"cells":{"repo_name":{"kind":"string","value":"vmobi-gogh/android_kernel_samsung_gogh"},"path":{"kind":"string","value":"Documentation/networking/cxacru-cf.py"},"copies":{"kind":"string","value":"14668"},"size":{"kind":"string","value":"1626"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright 2009 Simon Arlott\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your option)\n# any later version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or\n# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for\n# more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 59\n# Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n# Usage: cxacru-cf.py < cxacru-cf.bin\n# Output: values string suitable for the sysfs adsl_config attribute\n#\n# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110\n# contains mis-aligned values which will stop the modem from being able\n# to make a connection. If the first and last two bytes are removed then\n# the values become valid, but the modulation will be forced to ANSI\n# T1.413 only which may not be appropriate.\n#\n# The original binary format is a packed list of le32 values.\n\nimport sys\nimport struct\n\ni = 0\nwhile True:\n\tbuf = sys.stdin.read(4)\n\n\tif len(buf) == 0:\n\t\tbreak\n\telif len(buf) != 4:\n\t\tsys.stdout.write(\"\\n\")\n\t\tsys.stderr.write(\"Error: read {0} not 4 bytes\\n\".format(len(buf)))\n\t\tsys.exit(1)\n\n\tif i > 0:\n\t\tsys.stdout.write(\" \")\n\tsys.stdout.write(\"{0:x}={1}\".format(i, struct.unpack(\" self.count:\n if self.first == self.last:\n self.first = None\n self.last = None\n return\n a = self.first\n a.next.prev = None\n self.first = a.next\n a.next = None\n del self.d[a.me[0]]\n del a\n\n @synchronized()\n def __delitem__(self, obj):\n nobj = self.d[obj]\n if nobj.prev:\n nobj.prev.next = nobj.next\n else:\n self.first = nobj.next\n if nobj.next:\n nobj.next.prev = nobj.prev\n else:\n self.last = nobj.prev\n del self.d[obj]\n\n @synchronized()\n def __iter__(self):\n cur = self.first\n while cur is not None:\n cur2 = cur.next\n yield cur.me[1]\n cur = cur2\n\n @synchronized()\n def __len__(self):\n return len(self.d)\n\n @synchronized()\n def iteritems(self):\n cur = self.first\n while cur is not None:\n cur2 = cur.next\n yield cur.me\n cur = cur2\n\n @synchronized()\n def iterkeys(self):\n return iter(self.d)\n\n @synchronized()\n def itervalues(self):\n for i,j in self.iteritems():\n yield j\n\n @synchronized()\n def keys(self):\n return self.d.keys()\n\n @synchronized()\n def pop(self,key):\n v=self[key]\n del self[key]\n return v\n\n @synchronized()\n def clear(self):\n self.d = {}\n self.first = None\n self.last = None\n\n @synchronized()\n def clear_prefix(self, prefix):\n \"\"\" Remove from `self` all the items with the given `prefix`. \"\"\"\n n = len(prefix)\n for key in self.keys():\n if key[:n] == prefix:\n del self[key]\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":7655114437631426000,"string":"7,655,114,437,631,426,000"},"line_mean":{"kind":"number","value":23.5923076923,"string":"23.592308"},"line_max":{"kind":"number","value":99,"string":"99"},"alpha_frac":{"kind":"number","value":0.5101657804,"string":"0.510166"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.674712643678161,"string":"3.674713"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":68,"cells":{"repo_name":{"kind":"string","value":"Ircam-Web/mezzanine-organization"},"path":{"kind":"string","value":"organization/projects/migrations/0085_auto_20190619_2023.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1116"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n# Generated by Django 1.10.8 on 2019-06-19 18:23\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('organization-projects', '0084_auto_20190304_2221'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='projectpage',\n options={'permissions': (('user_edit', 'Mezzo - User can edit its own content'), ('user_delete', 'Mezzo - User can delete its own content'), ('team_edit', \"Mezzo - User can edit his team's content\"), ('team_delete', \"Mezzo - User can delete his team's content\"))},\n ),\n migrations.AddField(\n model_name='projectpage',\n name='user',\n field=models.ForeignKey(default=4, on_delete=django.db.models.deletion.CASCADE, related_name='projectpages', to=settings.AUTH_USER_MODEL, verbose_name='Author'),\n preserve_default=False,\n ),\n ]\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":4549997135370149400,"string":"4,549,997,135,370,149,400"},"line_mean":{"kind":"number","value":38.8571428571,"string":"38.857143"},"line_max":{"kind":"number","value":276,"string":"276"},"alpha_frac":{"kind":"number","value":0.6505376344,"string":"0.650538"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.943462897526502,"string":"3.943463"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":69,"cells":{"repo_name":{"kind":"string","value":"sdague/home-assistant"},"path":{"kind":"string","value":"homeassistant/components/dyson/sensor.py"},"copies":{"kind":"string","value":"5"},"size":{"kind":"string","value":"6457"},"content":{"kind":"string","value":"\"\"\"Support for Dyson Pure Cool Link Sensors.\"\"\"\nimport logging\n\nfrom libpurecool.dyson_pure_cool import DysonPureCool\nfrom libpurecool.dyson_pure_cool_link import DysonPureCoolLink\n\nfrom homeassistant.const import PERCENTAGE, STATE_OFF, TEMP_CELSIUS, TIME_HOURS\nfrom homeassistant.helpers.entity import Entity\n\nfrom . import DYSON_DEVICES\n\nSENSOR_UNITS = {\n \"air_quality\": None,\n \"dust\": None,\n \"filter_life\": TIME_HOURS,\n \"humidity\": PERCENTAGE,\n}\n\nSENSOR_ICONS = {\n \"air_quality\": \"mdi:fan\",\n \"dust\": \"mdi:cloud\",\n \"filter_life\": \"mdi:filter-outline\",\n \"humidity\": \"mdi:water-percent\",\n \"temperature\": \"mdi:thermometer\",\n}\n\nDYSON_SENSOR_DEVICES = \"dyson_sensor_devices\"\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Set up the Dyson Sensors.\"\"\"\n\n if discovery_info is None:\n return\n\n hass.data.setdefault(DYSON_SENSOR_DEVICES, [])\n unit = hass.config.units.temperature_unit\n devices = hass.data[DYSON_SENSOR_DEVICES]\n\n # Get Dyson Devices from parent component\n device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]]\n new_entities = []\n for device in hass.data[DYSON_DEVICES]:\n if isinstance(device, DysonPureCool):\n if f\"{device.serial}-temperature\" not in device_ids:\n new_entities.append(DysonTemperatureSensor(device, unit))\n if f\"{device.serial}-humidity\" not in device_ids:\n new_entities.append(DysonHumiditySensor(device))\n elif isinstance(device, DysonPureCoolLink):\n new_entities.append(DysonFilterLifeSensor(device))\n new_entities.append(DysonDustSensor(device))\n new_entities.append(DysonHumiditySensor(device))\n new_entities.append(DysonTemperatureSensor(device, unit))\n new_entities.append(DysonAirQualitySensor(device))\n\n if not new_entities:\n return\n\n devices.extend(new_entities)\n add_entities(devices)\n\n\nclass DysonSensor(Entity):\n \"\"\"Representation of a generic Dyson sensor.\"\"\"\n\n def __init__(self, device, sensor_type):\n \"\"\"Create a new generic Dyson sensor.\"\"\"\n self._device = device\n self._old_value = None\n self._name = None\n self._sensor_type = sensor_type\n\n async def async_added_to_hass(self):\n \"\"\"Call when entity is added to hass.\"\"\"\n self._device.add_message_listener(self.on_message)\n\n def on_message(self, message):\n \"\"\"Handle new messages which are received from the fan.\"\"\"\n # Prevent refreshing if not needed\n if self._old_value is None or self._old_value != self.state:\n _LOGGER.debug(\"Message received for %s device: %s\", self.name, message)\n self._old_value = self.state\n self.schedule_update_ha_state()\n\n @property\n def should_poll(self):\n \"\"\"No polling needed.\"\"\"\n return False\n\n @property\n def name(self):\n \"\"\"Return the name of the Dyson sensor name.\"\"\"\n return self._name\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return SENSOR_UNITS[self._sensor_type]\n\n @property\n def icon(self):\n \"\"\"Return the icon for this sensor.\"\"\"\n return SENSOR_ICONS[self._sensor_type]\n\n @property\n def unique_id(self):\n \"\"\"Return the sensor's unique id.\"\"\"\n return f\"{self._device.serial}-{self._sensor_type}\"\n\n\nclass DysonFilterLifeSensor(DysonSensor):\n \"\"\"Representation of Dyson Filter Life sensor (in hours).\"\"\"\n\n def __init__(self, device):\n \"\"\"Create a new Dyson Filter Life sensor.\"\"\"\n super().__init__(device, \"filter_life\")\n self._name = f\"{self._device.name} Filter Life\"\n\n @property\n def state(self):\n \"\"\"Return filter life in hours.\"\"\"\n if self._device.state:\n return int(self._device.state.filter_life)\n return None\n\n\nclass DysonDustSensor(DysonSensor):\n \"\"\"Representation of Dyson Dust sensor (lower is better).\"\"\"\n\n def __init__(self, device):\n \"\"\"Create a new Dyson Dust sensor.\"\"\"\n super().__init__(device, \"dust\")\n self._name = f\"{self._device.name} Dust\"\n\n @property\n def state(self):\n \"\"\"Return Dust value.\"\"\"\n if self._device.environmental_state:\n return self._device.environmental_state.dust\n return None\n\n\nclass DysonHumiditySensor(DysonSensor):\n \"\"\"Representation of Dyson Humidity sensor.\"\"\"\n\n def __init__(self, device):\n \"\"\"Create a new Dyson Humidity sensor.\"\"\"\n super().__init__(device, \"humidity\")\n self._name = f\"{self._device.name} Humidity\"\n\n @property\n def state(self):\n \"\"\"Return Humidity value.\"\"\"\n if self._device.environmental_state:\n if self._device.environmental_state.humidity == 0:\n return STATE_OFF\n return self._device.environmental_state.humidity\n return None\n\n\nclass DysonTemperatureSensor(DysonSensor):\n \"\"\"Representation of Dyson Temperature sensor.\"\"\"\n\n def __init__(self, device, unit):\n \"\"\"Create a new Dyson Temperature sensor.\"\"\"\n super().__init__(device, \"temperature\")\n self._name = f\"{self._device.name} Temperature\"\n self._unit = unit\n\n @property\n def state(self):\n \"\"\"Return Temperature value.\"\"\"\n if self._device.environmental_state:\n temperature_kelvin = self._device.environmental_state.temperature\n if temperature_kelvin == 0:\n return STATE_OFF\n if self._unit == TEMP_CELSIUS:\n return float(f\"{(temperature_kelvin - 273.15):.1f}\")\n return float(f\"{(temperature_kelvin * 9 / 5 - 459.67):.1f}\")\n return None\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit the value is expressed in.\"\"\"\n return self._unit\n\n\nclass DysonAirQualitySensor(DysonSensor):\n \"\"\"Representation of Dyson Air Quality sensor (lower is better).\"\"\"\n\n def __init__(self, device):\n \"\"\"Create a new Dyson Air Quality sensor.\"\"\"\n super().__init__(device, \"air_quality\")\n self._name = f\"{self._device.name} AQI\"\n\n @property\n def state(self):\n \"\"\"Return Air Quality value.\"\"\"\n if self._device.environmental_state:\n return int(self._device.environmental_state.volatil_organic_compounds)\n return None\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":5239885266624850000,"string":"5,239,885,266,624,850,000"},"line_mean":{"kind":"number","value":30.8078817734,"string":"30.807882"},"line_max":{"kind":"number","value":83,"string":"83"},"alpha_frac":{"kind":"number","value":0.6275360074,"string":"0.627536"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.8275044457617073,"string":"3.827504"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":70,"cells":{"repo_name":{"kind":"string","value":"suninsky/ReceiptOCR"},"path":{"kind":"string","value":"Python/server/lib/python2.7/site-packages/markupsafe/__init__.py"},"copies":{"kind":"string","value":"144"},"size":{"kind":"string","value":"10697"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\n markupsafe\n ~~~~~~~~~~\n\n Implements a Markup string.\n\n :copyright: (c) 2010 by Armin Ronacher.\n :license: BSD, see LICENSE for more details.\n\"\"\"\nimport re\nimport string\nfrom collections import Mapping\nfrom markupsafe._compat import text_type, string_types, int_types, \\\n unichr, iteritems, PY2\n\n__version__ = \"1.0\"\n\n__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']\n\n\n_striptags_re = re.compile(r'(|<[^>]*>)')\n_entity_re = re.compile(r'&([^& ;]+);')\n\n\nclass Markup(text_type):\n r\"\"\"Marks a string as being safe for inclusion in HTML/XML output without\n needing to be escaped. This implements the `__html__` interface a couple\n of frameworks and web applications use. :class:`Markup` is a direct\n subclass of `unicode` and provides all the methods of `unicode` just that\n it escapes arguments passed and always returns `Markup`.\n\n The `escape` function returns markup objects so that double escaping can't\n happen.\n\n The constructor of the :class:`Markup` class can be used for three\n different things: When passed an unicode object it's assumed to be safe,\n when passed an object with an HTML representation (has an `__html__`\n method) that representation is used, otherwise the object passed is\n converted into a unicode string and then assumed to be safe:\n\n >>> Markup(\"Hello World!\")\n Markup(u'Hello World!')\n >>> class Foo(object):\n ... def __html__(self):\n ... return 'foo'\n ...\n >>> Markup(Foo())\n Markup(u'foo')\n\n If you want object passed being always treated as unsafe you can use the\n :meth:`escape` classmethod to create a :class:`Markup` object:\n\n >>> Markup.escape(\"Hello World!\")\n Markup(u'Hello &lt;em&gt;World&lt;/em&gt;!')\n\n Operations on a markup string are markup aware which means that all\n arguments are passed through the :func:`escape` function:\n\n >>> em = Markup(\"%s\")\n >>> em % \"foo & bar\"\n Markup(u'foo &amp; bar')\n >>> strong = Markup(\"%(text)s\")\n >>> strong % {'text': 'hacker here'}\n Markup(u'&lt;blink&gt;hacker here&lt;/blink&gt;')\n >>> Markup(\"Hello \") + \"\"\n Markup(u'Hello &lt;foo&gt;')\n \"\"\"\n __slots__ = ()\n\n def __new__(cls, base=u'', encoding=None, errors='strict'):\n if hasattr(base, '__html__'):\n base = base.__html__()\n if encoding is None:\n return text_type.__new__(cls, base)\n return text_type.__new__(cls, base, encoding, errors)\n\n def __html__(self):\n return self\n\n def __add__(self, other):\n if isinstance(other, string_types) or hasattr(other, '__html__'):\n return self.__class__(super(Markup, self).__add__(self.escape(other)))\n return NotImplemented\n\n def __radd__(self, other):\n if hasattr(other, '__html__') or isinstance(other, string_types):\n return self.escape(other).__add__(self)\n return NotImplemented\n\n def __mul__(self, num):\n if isinstance(num, int_types):\n return self.__class__(text_type.__mul__(self, num))\n return NotImplemented\n __rmul__ = __mul__\n\n def __mod__(self, arg):\n if isinstance(arg, tuple):\n arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)\n else:\n arg = _MarkupEscapeHelper(arg, self.escape)\n return self.__class__(text_type.__mod__(self, arg))\n\n def __repr__(self):\n return '%s(%s)' % (\n self.__class__.__name__,\n text_type.__repr__(self)\n )\n\n def join(self, seq):\n return self.__class__(text_type.join(self, map(self.escape, seq)))\n join.__doc__ = text_type.join.__doc__\n\n def split(self, *args, **kwargs):\n return list(map(self.__class__, text_type.split(self, *args, **kwargs)))\n split.__doc__ = text_type.split.__doc__\n\n def rsplit(self, *args, **kwargs):\n return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))\n rsplit.__doc__ = text_type.rsplit.__doc__\n\n def splitlines(self, *args, **kwargs):\n return list(map(self.__class__, text_type.splitlines(\n self, *args, **kwargs)))\n splitlines.__doc__ = text_type.splitlines.__doc__\n\n def unescape(self):\n r\"\"\"Unescape markup again into an text_type string. This also resolves\n known HTML4 and XHTML entities:\n\n >>> Markup(\"Main &raquo; About\").unescape()\n u'Main \\xbb About'\n \"\"\"\n from markupsafe._constants import HTML_ENTITIES\n def handle_match(m):\n name = m.group(1)\n if name in HTML_ENTITIES:\n return unichr(HTML_ENTITIES[name])\n try:\n if name[:2] in ('#x', '#X'):\n return unichr(int(name[2:], 16))\n elif name.startswith('#'):\n return unichr(int(name[1:]))\n except ValueError:\n pass\n # Don't modify unexpected input.\n return m.group()\n return _entity_re.sub(handle_match, text_type(self))\n\n def striptags(self):\n r\"\"\"Unescape markup into an text_type string and strip all tags. This\n also resolves known HTML4 and XHTML entities. Whitespace is\n normalized to one:\n\n >>> Markup(\"Main &raquo; About\").striptags()\n u'Main \\xbb About'\n \"\"\"\n stripped = u' '.join(_striptags_re.sub('', self).split())\n return Markup(stripped).unescape()\n\n @classmethod\n def escape(cls, s):\n \"\"\"Escape the string. Works like :func:`escape` with the difference\n that for subclasses of :class:`Markup` this function would return the\n correct subclass.\n \"\"\"\n rv = escape(s)\n if rv.__class__ is not cls:\n return cls(rv)\n return rv\n\n def make_simple_escaping_wrapper(name):\n orig = getattr(text_type, name)\n def func(self, *args, **kwargs):\n args = _escape_argspec(list(args), enumerate(args), self.escape)\n _escape_argspec(kwargs, iteritems(kwargs), self.escape)\n return self.__class__(orig(self, *args, **kwargs))\n func.__name__ = orig.__name__\n func.__doc__ = orig.__doc__\n return func\n\n for method in '__getitem__', 'capitalize', \\\n 'title', 'lower', 'upper', 'replace', 'ljust', \\\n 'rjust', 'lstrip', 'rstrip', 'center', 'strip', \\\n 'translate', 'expandtabs', 'swapcase', 'zfill':\n locals()[method] = make_simple_escaping_wrapper(method)\n\n # new in python 2.5\n if hasattr(text_type, 'partition'):\n def partition(self, sep):\n return tuple(map(self.__class__,\n text_type.partition(self, self.escape(sep))))\n def rpartition(self, sep):\n return tuple(map(self.__class__,\n text_type.rpartition(self, self.escape(sep))))\n\n # new in python 2.6\n if hasattr(text_type, 'format'):\n def format(*args, **kwargs):\n self, args = args[0], args[1:]\n formatter = EscapeFormatter(self.escape)\n kwargs = _MagicFormatMapping(args, kwargs)\n return self.__class__(formatter.vformat(self, args, kwargs))\n\n def __html_format__(self, format_spec):\n if format_spec:\n raise ValueError('Unsupported format specification '\n 'for Markup.')\n return self\n\n # not in python 3\n if hasattr(text_type, '__getslice__'):\n __getslice__ = make_simple_escaping_wrapper('__getslice__')\n\n del method, make_simple_escaping_wrapper\n\n\nclass _MagicFormatMapping(Mapping):\n \"\"\"This class implements a dummy wrapper to fix a bug in the Python\n standard library for string formatting.\n\n See http://bugs.python.org/issue13598 for information about why\n this is necessary.\n \"\"\"\n\n def __init__(self, args, kwargs):\n self._args = args\n self._kwargs = kwargs\n self._last_index = 0\n\n def __getitem__(self, key):\n if key == '':\n idx = self._last_index\n self._last_index += 1\n try:\n return self._args[idx]\n except LookupError:\n pass\n key = str(idx)\n return self._kwargs[key]\n\n def __iter__(self):\n return iter(self._kwargs)\n\n def __len__(self):\n return len(self._kwargs)\n\n\nif hasattr(text_type, 'format'):\n class EscapeFormatter(string.Formatter):\n\n def __init__(self, escape):\n self.escape = escape\n\n def format_field(self, value, format_spec):\n if hasattr(value, '__html_format__'):\n rv = value.__html_format__(format_spec)\n elif hasattr(value, '__html__'):\n if format_spec:\n raise ValueError('No format specification allowed '\n 'when formatting an object with '\n 'its __html__ method.')\n rv = value.__html__()\n else:\n # We need to make sure the format spec is unicode here as\n # otherwise the wrong callback methods are invoked. For\n # instance a byte string there would invoke __str__ and\n # not __unicode__.\n rv = string.Formatter.format_field(\n self, value, text_type(format_spec))\n return text_type(self.escape(rv))\n\n\ndef _escape_argspec(obj, iterable, escape):\n \"\"\"Helper for various string-wrapped functions.\"\"\"\n for key, value in iterable:\n if hasattr(value, '__html__') or isinstance(value, string_types):\n obj[key] = escape(value)\n return obj\n\n\nclass _MarkupEscapeHelper(object):\n \"\"\"Helper for Markup.__mod__\"\"\"\n\n def __init__(self, obj, escape):\n self.obj = obj\n self.escape = escape\n\n __getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)\n __unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))\n __repr__ = lambda s: str(s.escape(repr(s.obj)))\n __int__ = lambda s: int(s.obj)\n __float__ = lambda s: float(s.obj)\n\n\n# we have to import it down here as the speedups and native\n# modules imports the markup type which is define above.\ntry:\n from markupsafe._speedups import escape, escape_silent, soft_unicode\nexcept ImportError:\n from markupsafe._native import escape, escape_silent, soft_unicode\n\nif not PY2:\n soft_str = soft_unicode\n __all__.append('soft_str')\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":2225617646636672500,"string":"2,225,617,646,636,672,500"},"line_mean":{"kind":"number","value":34.0721311475,"string":"34.072131"},"line_max":{"kind":"number","value":82,"string":"82"},"alpha_frac":{"kind":"number","value":0.5673553333,"string":"0.567355"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.029001883239172,"string":"4.029002"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":71,"cells":{"repo_name":{"kind":"string","value":"ahmadio/edx-platform"},"path":{"kind":"string","value":"lms/djangoapps/mobile_api/social_facebook/groups/views.py"},"copies":{"kind":"string","value":"86"},"size":{"kind":"string","value":"4938"},"content":{"kind":"string","value":"\"\"\"\nViews for groups info API\n\"\"\"\n\nfrom rest_framework import generics, status, mixins\nfrom rest_framework.response import Response\nfrom django.conf import settings\nimport facebook\n\nfrom ...utils import mobile_view\nfrom . import serializers\n\n\n@mobile_view()\nclass Groups(generics.CreateAPIView, mixins.DestroyModelMixin):\n \"\"\"\n **Use Case**\n\n An API to Create or Delete course groups.\n\n Note: The Delete is not invoked from the current version of the app\n and is used only for testing with facebook dependencies.\n\n **Creation Example request**:\n\n POST /api/mobile/v0.5/social/facebook/groups/\n\n Parameters: name : string,\n description : string,\n privacy : open/closed\n\n **Creation Response Values**\n\n {\"id\": group_id}\n\n **Deletion Example request**:\n\n DELETE /api/mobile/v0.5/social/facebook/groups/\n\n **Deletion Response Values**\n\n {\"success\" : \"true\"}\n\n \"\"\"\n serializer_class = serializers.GroupSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n try:\n app_groups_response = facebook_graph_api().request(\n settings.FACEBOOK_API_VERSION + '/' + settings.FACEBOOK_APP_ID + \"/groups\",\n post_args=request.POST.dict()\n )\n return Response(app_groups_response)\n except facebook.GraphAPIError, ex:\n return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Deletes the course group.\n \"\"\"\n try:\n return Response(\n facebook_graph_api().request(\n settings.FACEBOOK_API_VERSION + '/' + settings.FACEBOOK_APP_ID + \"/groups/\" + kwargs['group_id'],\n post_args={'method': 'delete'}\n )\n )\n except facebook.GraphAPIError, ex:\n return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)\n\n\n@mobile_view()\nclass GroupsMembers(generics.CreateAPIView, mixins.DestroyModelMixin):\n \"\"\"\n **Use Case**\n\n An API to Invite and Remove members to a group\n\n Note: The Remove is not invoked from the current version\n of the app and is used only for testing with facebook dependencies.\n\n **Invite Example request**:\n\n POST /api/mobile/v0.5/social/facebook/groups//member/\n\n Parameters: members : int,int,int...\n\n\n **Invite Response Values**\n\n {\"member_id\" : success/error_message}\n A response with each member_id and whether or not the member was added successfully.\n If the member was not added successfully the Facebook error message is provided.\n\n **Remove Example request**:\n\n DELETE /api/mobile/v0.5/social/facebook/groups//member/\n\n **Remove Response Values**\n\n {\"success\" : \"true\"}\n \"\"\"\n serializer_class = serializers.GroupsMembersSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n graph = facebook_graph_api()\n url = settings.FACEBOOK_API_VERSION + '/' + kwargs['group_id'] + \"/members\"\n member_ids = serializer.object['member_ids'].split(',')\n response = {}\n for member_id in member_ids:\n try:\n if 'success' in graph.request(url, post_args={'member': member_id}):\n response[member_id] = 'success'\n except facebook.GraphAPIError, ex:\n response[member_id] = ex.result['error']['message']\n return Response(response, status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Deletes the member from the course group.\n \"\"\"\n try:\n return Response(\n facebook_graph_api().request(\n settings.FACEBOOK_API_VERSION + '/' + kwargs['group_id'] + \"/members\",\n post_args={'method': 'delete', 'member': kwargs['member_id']}\n )\n )\n except facebook.GraphAPIError, ex:\n return Response({'error': ex.result['error']['message']}, status=status.HTTP_400_BAD_REQUEST)\n\n\ndef facebook_graph_api():\n \"\"\"\n Returns the result from calling Facebook's Graph API with the app's access token.\n \"\"\"\n return facebook.GraphAPI(facebook.get_app_access_token(settings.FACEBOOK_APP_ID, settings.FACEBOOK_APP_SECRET))\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-5134061850296026000,"string":"-5,134,061,850,296,026,000"},"line_mean":{"kind":"number","value":33.5314685315,"string":"33.531469"},"line_max":{"kind":"number","value":117,"string":"117"},"alpha_frac":{"kind":"number","value":0.6148238153,"string":"0.614824"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.216908625106746,"string":"4.216909"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":72,"cells":{"repo_name":{"kind":"string","value":"skidzen/grit-i18n"},"path":{"kind":"string","value":"grit/tool/build.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"19603"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# Copyright (c) 2012 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n'''The 'grit build' tool along with integration for this tool with the\nSCons build system.\n'''\n\nimport filecmp\nimport getopt\nimport os\nimport shutil\nimport sys\n\nfrom grit import grd_reader\nfrom grit import util\nfrom grit.tool import interface\nfrom grit import shortcuts\n\n\n# It would be cleaner to have each module register itself, but that would\n# require importing all of them on every run of GRIT.\n'''Map from node types to modules under grit.format.'''\n_format_modules = {\n 'android': 'android_xml',\n 'c_format': 'c_format',\n 'chrome_messages_json': 'chrome_messages_json',\n 'data_package': 'data_pack',\n 'js_map_format': 'js_map_format',\n 'rc_all': 'rc',\n 'rc_translateable': 'rc',\n 'rc_nontranslateable': 'rc',\n 'rc_header': 'rc_header',\n 'resource_map_header': 'resource_map',\n 'resource_map_source': 'resource_map',\n 'resource_file_map_source': 'resource_map',\n}\n_format_modules.update(\n (type, 'policy_templates.template_formatter') for type in\n [ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',\n 'plist', 'plist_strings', 'ios_plist', 'android_policy' ])\n\n\ndef GetFormatter(type):\n modulename = 'grit.format.' + _format_modules[type]\n __import__(modulename)\n module = sys.modules[modulename]\n try:\n return module.Format\n except AttributeError:\n return module.GetFormatter(type)\n\n\nclass RcBuilder(interface.Tool):\n '''A tool that builds RC files and resource header files for compilation.\n\nUsage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*\n\nAll output options for this tool are specified in the input file (see\n'grit help' for details on how to specify the input file - it is a global\noption).\n\nOptions:\n\n -a FILE Assert that the given file is an output. There can be\n multiple \"-a\" flags listed for multiple outputs. If a \"-a\"\n or \"--assert-file-list\" argument is present, then the list\n of asserted files must match the output files or the tool\n will fail. The use-case is for the build system to maintain\n separate lists of output files and to catch errors if the\n build system's list and the grit list are out-of-sync.\n\n --assert-file-list Provide a file listing multiple asserted output files.\n There is one file name per line. This acts like specifying\n each file with \"-a\" on the command line, but without the\n possibility of running into OS line-length limits for very\n long lists.\n\n -o OUTPUTDIR Specify what directory output paths are relative to.\n Defaults to the current directory.\n\n -D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional\n value VAL (defaults to 1) which will be used to control\n conditional inclusion of resources.\n\n -E NAME=VALUE Set environment variable NAME to VALUE (within grit).\n\n -f FIRSTIDSFILE Path to a python file that specifies the first id of\n value to use for resources. A non-empty value here will\n override the value specified in the node's\n first_ids_file.\n\n -w WHITELISTFILE Path to a file containing the string names of the\n resources to include. Anything not listed is dropped.\n\n -t PLATFORM Specifies the platform the build is targeting; defaults\n to the value of sys.platform. The value provided via this\n flag should match what sys.platform would report for your\n target platform; see grit.node.base.EvaluateCondition.\n\n -h HEADERFORMAT Custom format string to use for generating rc header files.\n The string should have two placeholders: {textual_id}\n and {numeric_id}. E.g. \"#define {textual_id} {numeric_id}\"\n Otherwise it will use the default \"#define SYMBOL 1234\"\n\n --output-all-resource-defines\n --no-output-all-resource-defines If specified, overrides the value of the\n output_all_resource_defines attribute of the root \n element of the input .grd file.\n\n --write-only-new flag\n If flag is non-0, write output files to a temporary file\n first, and copy it to the real output only if the new file\n is different from the old file. This allows some build\n systems to realize that dependent build steps might be\n unnecessary, at the cost of comparing the output data at\n grit time.\n\n --depend-on-stamp\n If specified along with --depfile and --depdir, the depfile\n generated will depend on a stampfile instead of the first\n output in the input .grd file.\n\nConditional inclusion of resources only affects the output of files which\ncontrol which resources get linked into a binary, e.g. it affects .rc files\nmeant for compilation but it does not affect resource header files (that define\nIDs). This helps ensure that values of IDs stay the same, that all messages\nare exported to translation interchange files (e.g. XMB files), etc.\n'''\n\n def ShortDescription(self):\n return 'A tool that builds RC files for compilation.'\n\n def Run(self, opts, args):\n self.output_directory = '.'\n first_ids_file = None\n whitelist_filenames = []\n assert_output_files = []\n target_platform = None\n depfile = None\n depdir = None\n rc_header_format = None\n output_all_resource_defines = None\n write_only_new = False\n depend_on_stamp = False\n (own_opts, args) = getopt.getopt(args, 'a:o:D:E:f:w:t:h:',\n ('depdir=','depfile=','assert-file-list=',\n 'output-all-resource-defines',\n 'no-output-all-resource-defines',\n 'depend-on-stamp',\n 'write-only-new='))\n for (key, val) in own_opts:\n if key == '-a':\n assert_output_files.append(val)\n elif key == '--assert-file-list':\n with open(val) as f:\n assert_output_files += f.read().splitlines()\n elif key == '-o':\n self.output_directory = val\n elif key == '-D':\n name, val = util.ParseDefine(val)\n self.defines[name] = val\n elif key == '-E':\n (env_name, env_value) = val.split('=', 1)\n os.environ[env_name] = env_value\n elif key == '-f':\n # TODO(joi@chromium.org): Remove this override once change\n # lands in WebKit.grd to specify the first_ids_file in the\n # .grd itself.\n first_ids_file = val\n elif key == '-w':\n whitelist_filenames.append(val)\n elif key == '--output-all-resource-defines':\n output_all_resource_defines = True\n elif key == '--no-output-all-resource-defines':\n output_all_resource_defines = False\n elif key == '-t':\n target_platform = val\n elif key == '-h':\n rc_header_format = val\n elif key == '--depdir':\n depdir = val\n elif key == '--depfile':\n depfile = val\n elif key == '--write-only-new':\n write_only_new = val != '0'\n elif key == '--depend-on-stamp':\n depend_on_stamp = True\n\n if len(args):\n print 'This tool takes no tool-specific arguments.'\n return 2\n self.SetOptions(opts)\n if self.scons_targets:\n self.VerboseOut('Using SCons targets to identify files to output.\\n')\n else:\n self.VerboseOut('Output directory: %s (absolute path: %s)\\n' %\n (self.output_directory,\n os.path.abspath(self.output_directory)))\n\n if whitelist_filenames:\n self.whitelist_names = set()\n for whitelist_filename in whitelist_filenames:\n self.VerboseOut('Using whitelist: %s\\n' % whitelist_filename);\n whitelist_contents = util.ReadFile(whitelist_filename, util.RAW_TEXT)\n self.whitelist_names.update(whitelist_contents.strip().split('\\n'))\n\n self.write_only_new = write_only_new\n\n self.res = grd_reader.Parse(opts.input,\n debug=opts.extra_verbose,\n first_ids_file=first_ids_file,\n defines=self.defines,\n target_platform=target_platform)\n\n # If the output_all_resource_defines option is specified, override the value\n # found in the grd file.\n if output_all_resource_defines is not None:\n self.res.SetShouldOutputAllResourceDefines(output_all_resource_defines)\n\n # Set an output context so that conditionals can use defines during the\n # gathering stage; we use a dummy language here since we are not outputting\n # a specific language.\n self.res.SetOutputLanguage('en')\n if rc_header_format:\n self.res.AssignRcHeaderFormat(rc_header_format)\n self.res.RunGatherers()\n self.Process()\n\n if assert_output_files:\n if not self.CheckAssertedOutputFiles(assert_output_files):\n return 2\n\n if depfile and depdir:\n self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)\n\n return 0\n\n def __init__(self, defines=None):\n # Default file-creation function is built-in open(). Only done to allow\n # overriding by unit test.\n self.fo_create = open\n\n # key/value pairs of C-preprocessor like defines that are used for\n # conditional output of resources\n self.defines = defines or {}\n\n # self.res is a fully-populated resource tree if Run()\n # has been called, otherwise None.\n self.res = None\n\n # Set to a list of filenames for the output nodes that are relative\n # to the current working directory. They are in the same order as the\n # output nodes in the file.\n self.scons_targets = None\n\n # The set of names that are whitelisted to actually be included in the\n # output.\n self.whitelist_names = None\n\n # Whether to compare outputs to their old contents before writing.\n self.write_only_new = False\n\n @staticmethod\n def AddWhitelistTags(start_node, whitelist_names):\n # Walk the tree of nodes added attributes for the nodes that shouldn't\n # be written into the target files (skip markers).\n from grit.node import include\n from grit.node import message\n from grit.node import structure\n for node in start_node:\n # Same trick data_pack.py uses to see what nodes actually result in\n # real items.\n if (isinstance(node, include.IncludeNode) or\n isinstance(node, message.MessageNode) or\n isinstance(node, structure.StructureNode)):\n text_ids = node.GetTextualIds()\n # Mark the item to be skipped if it wasn't in the whitelist.\n if text_ids and text_ids[0] not in whitelist_names:\n node.SetWhitelistMarkedAsSkip(True)\n\n @staticmethod\n def ProcessNode(node, output_node, outfile):\n '''Processes a node in-order, calling its formatter before and after\n recursing to its children.\n\n Args:\n node: grit.node.base.Node subclass\n output_node: grit.node.io.OutputNode\n outfile: open filehandle\n '''\n base_dir = util.dirname(output_node.GetOutputFilename())\n\n formatter = GetFormatter(output_node.GetType())\n formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)\n outfile.writelines(formatted)\n\n\n def Process(self):\n # Update filenames with those provided by SCons if we're being invoked\n # from SCons. The list of SCons targets also includes all \n # node outputs, but it starts with our output files, in the order they\n # occur in the .grd\n if self.scons_targets:\n assert len(self.scons_targets) >= len(self.res.GetOutputFiles())\n outfiles = self.res.GetOutputFiles()\n for ix in range(len(outfiles)):\n outfiles[ix].output_filename = os.path.abspath(\n self.scons_targets[ix])\n else:\n for output in self.res.GetOutputFiles():\n output.output_filename = os.path.abspath(os.path.join(\n self.output_directory, output.GetFilename()))\n\n # If there are whitelisted names, tag the tree once up front, this way\n # while looping through the actual output, it is just an attribute check.\n if self.whitelist_names:\n self.AddWhitelistTags(self.res, self.whitelist_names)\n\n for output in self.res.GetOutputFiles():\n self.VerboseOut('Creating %s...' % output.GetFilename())\n\n # Microsoft's RC compiler can only deal with single-byte or double-byte\n # files (no UTF-8), so we make all RC files UTF-16 to support all\n # character sets.\n if output.GetType() in ('rc_header', 'resource_map_header',\n 'resource_map_source', 'resource_file_map_source'):\n encoding = 'cp1252'\n elif output.GetType() in ('android', 'c_format', 'js_map_format', 'plist',\n 'plist_strings', 'doc', 'json', 'android_policy'):\n encoding = 'utf_8'\n elif output.GetType() in ('chrome_messages_json'):\n # Chrome Web Store currently expects BOM for UTF-8 files :-(\n encoding = 'utf-8-sig'\n else:\n # TODO(gfeher) modify here to set utf-8 encoding for admx/adml\n encoding = 'utf_16'\n\n # Set the context, for conditional inclusion of resources\n self.res.SetOutputLanguage(output.GetLanguage())\n self.res.SetOutputContext(output.GetContext())\n self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())\n self.res.SetDefines(self.defines)\n\n # Make the output directory if it doesn't exist.\n self.MakeDirectoriesTo(output.GetOutputFilename())\n\n # Write the results to a temporary file and only overwrite the original\n # if the file changed. This avoids unnecessary rebuilds.\n outfile = self.fo_create(output.GetOutputFilename() + '.tmp', 'wb')\n\n if output.GetType() != 'data_package':\n outfile = util.WrapOutputStream(outfile, encoding)\n\n # Iterate in-order through entire resource tree, calling formatters on\n # the entry into a node and on exit out of it.\n with outfile:\n self.ProcessNode(self.res, output, outfile)\n\n # Now copy from the temp file back to the real output, but on Windows,\n # only if the real output doesn't exist or the contents of the file\n # changed. This prevents identical headers from being written and .cc\n # files from recompiling (which is painful on Windows).\n if not os.path.exists(output.GetOutputFilename()):\n os.rename(output.GetOutputFilename() + '.tmp',\n output.GetOutputFilename())\n else:\n # CHROMIUM SPECIFIC CHANGE.\n # This clashes with gyp + vstudio, which expect the output timestamp\n # to change on a rebuild, even if nothing has changed, so only do\n # it when opted in.\n if not self.write_only_new:\n write_file = True\n else:\n files_match = filecmp.cmp(output.GetOutputFilename(),\n output.GetOutputFilename() + '.tmp')\n write_file = not files_match\n if write_file:\n shutil.copy2(output.GetOutputFilename() + '.tmp',\n output.GetOutputFilename())\n os.remove(output.GetOutputFilename() + '.tmp')\n\n self.VerboseOut(' done.\\n')\n\n # Print warnings if there are any duplicate shortcuts.\n warnings = shortcuts.GenerateDuplicateShortcutsWarnings(\n self.res.UberClique(), self.res.GetTcProject())\n if warnings:\n print '\\n'.join(warnings)\n\n # Print out any fallback warnings, and missing translation errors, and\n # exit with an error code if there are missing translations in a non-pseudo\n # and non-official build.\n warnings = (self.res.UberClique().MissingTranslationsReport().\n encode('ascii', 'replace'))\n if warnings:\n self.VerboseOut(warnings)\n if self.res.UberClique().HasMissingTranslations():\n print self.res.UberClique().missing_translations_\n sys.exit(-1)\n\n\n def CheckAssertedOutputFiles(self, assert_output_files):\n '''Checks that the asserted output files are specified in the given list.\n\n Returns true if the asserted files are present. If they are not, returns\n False and prints the failure.\n '''\n # Compare the absolute path names, sorted.\n asserted = sorted([os.path.abspath(i) for i in assert_output_files])\n actual = sorted([\n os.path.abspath(os.path.join(self.output_directory, i.GetFilename()))\n for i in self.res.GetOutputFiles()])\n\n if asserted != actual:\n missing = list(set(actual) - set(asserted))\n extra = list(set(asserted) - set(actual))\n error = '''Asserted file list does not match.\n\nExpected output files:\n%s\nActual output files:\n%s\nMissing output files:\n%s\nExtra output files:\n%s\n'''\n print error % ('\\n'.join(asserted), '\\n'.join(actual), '\\n'.join(missing),\n '\\n'.join(extra))\n return False\n return True\n\n\n def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):\n '''Generate a depfile that contains the imlicit dependencies of the input\n grd. The depfile will be in the same format as a makefile, and will contain\n references to files relative to |depdir|. It will be put in |depfile|.\n\n For example, supposing we have three files in a directory src/\n\n src/\n blah.grd <- depends on input{1,2}.xtb\n input1.xtb\n input2.xtb\n\n and we run\n\n grit -i blah.grd -o ../out/gen --depdir ../out --depfile ../out/gen/blah.rd.d\n\n from the directory src/ we will generate a depfile ../out/gen/blah.grd.d\n that has the contents\n\n gen/blah.h: ../src/input1.xtb ../src/input2.xtb\n\n Where \"gen/blah.h\" is the first output (Ninja expects the .d file to list\n the first output in cases where there is more than one). If the flag\n --depend-on-stamp is specified, \"gen/blah.rd.d.stamp\" will be used that is\n 'touched' whenever a new depfile is generated.\n\n Note that all paths in the depfile are relative to ../out, the depdir.\n '''\n depfile = os.path.abspath(depfile)\n depdir = os.path.abspath(depdir)\n infiles = self.res.GetInputFiles()\n\n # We want to trigger a rebuild if the first ids change.\n if first_ids_file is not None:\n infiles.append(first_ids_file)\n\n if (depend_on_stamp):\n output_file = depfile + \".stamp\"\n # Touch the stamp file before generating the depfile.\n with open(output_file, 'a'):\n os.utime(output_file, None)\n else:\n # Get the first output file relative to the depdir.\n outputs = self.res.GetOutputFiles()\n output_file = os.path.join(self.output_directory,\n outputs[0].GetFilename())\n\n output_file = os.path.relpath(output_file, depdir)\n # The path prefix to prepend to dependencies in the depfile.\n prefix = os.path.relpath(os.getcwd(), depdir)\n deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])\n\n depfile_contents = output_file + ': ' + deps_text\n self.MakeDirectoriesTo(depfile)\n outfile = self.fo_create(depfile, 'wb')\n outfile.writelines(depfile_contents)\n\n @staticmethod\n def MakeDirectoriesTo(file):\n '''Creates directories necessary to contain |file|.'''\n dir = os.path.split(file)[0]\n if not os.path.exists(dir):\n os.makedirs(dir)\n"},"license":{"kind":"string","value":"bsd-2-clause"},"hash":{"kind":"number","value":-7293954927933790000,"string":"-7,293,954,927,933,790,000"},"line_mean":{"kind":"number","value":38.3634538153,"string":"38.363454"},"line_max":{"kind":"number","value":83,"string":"83"},"alpha_frac":{"kind":"number","value":0.6444421772,"string":"0.644442"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.0285655569256065,"string":"4.028566"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":73,"cells":{"repo_name":{"kind":"string","value":"jonwright/ImageD11"},"path":{"kind":"string","value":"scripts/plotImageD11map.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"1829"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nfrom ImageD11.grain import read_grain_file\nimport sys, os\n\ngf = read_grain_file(sys.argv[1])\nmapfile=open(sys.argv[2],\"w\")\n\ndef dodot(xyz,k):\n mapfile.write(\"%f %f %f %d\\n\"%(xyz[0],xyz[1],xyz[2],k))\n\ndef getmedian(s):\n items=s.split()\n j = -1\n for i in range(len(items)):\n if items[i] == \"median\":\n j = i\n if j == -1:\n return 0\n return abs(float(items[j+2]))\n \ntry:\n outersf = float(sys.argv[3])\nexcept:\n outersf = 1.0\n\nprint(\"Scale factor is\",outersf)\nfor g in gf:\n #print g.translation, g.ubi\n mapfile.write(\"\\n\\n\")\n o = g.translation\n try:\n sf = pow(getmedian(g.intensity_info),0.3333)*outersf\n except:\n sf = outersf\n try:\n k = int(g.npks)\n except:\n k = 1\n for u in g.ubi:\n dodot(o,k)\n dodot(o+u*sf,int(g.npks))\n for u in g.ubi:\n dodot(o,k)\n dodot(o-u*sf,int(g.npks))\n# dodot(o,k)\n# dodot(o+sf*(-g.ubi[0]-g.ubi[1]),k)\n# dodot(o,k)\n# dodot(o+sf*(g.ubi[0]+g.ubi[1]),k)\n\nmapfile.close()\nterm = \" \"\nif \"linux\" in sys.platform:\n term = \"set term x11\"\nif \"win32\" in sys.platform:\n term = \"set term windows\"\n \nopen(\"gnuplot.in\",\"w\").write(\"\"\"\n%s\nset ticslevel 0\nset title \"Color proportional to number of peaks\"\nset palette model RGB\nset palette defined ( 0 \"violet\", 1 \"blue\", 2 \"green\", 3 \"yellow\", 4 \"orange\", 5 \"red\" )\nset view equal xyz\nset view 75,0,1,1\n#set terminal gif animate delay 10 loop 1 optimize size 1024,768\nset nokey\nset hidden3d\n#set output \"ImageD11map.gif\"\nsplot \"%s\" u 1:2:3:4 w l lw 2 lc pal z\n\"\"\"%(term, sys.argv[2])\n# \"\".join([\"set view 75,%d\\n replot\\n\"%(i) for i in range(1,360,1)])\n )\n\n\n \nos.system(\"gnuplot -background white gnuplot.in -\")\n\n \n\n"},"license":{"kind":"string","value":"gpl-2.0"},"hash":{"kind":"number","value":-5133550507565548000,"string":"-5,133,550,507,565,548,000"},"line_mean":{"kind":"number","value":21.3048780488,"string":"21.304878"},"line_max":{"kind":"number","value":88,"string":"88"},"alpha_frac":{"kind":"number","value":0.5746309459,"string":"0.574631"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":2.5796897038081807,"string":"2.57969"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":74,"cells":{"repo_name":{"kind":"string","value":"eicher31/compassion-modules"},"path":{"kind":"string","value":"child_compassion/mappings/household_mapping.py"},"copies":{"kind":"string","value":"3"},"size":{"kind":"string","value":"3536"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)\n# Releasing children from poverty in Jesus' name\n# @author: Emanuel Cino \n#\n# The licence is in the file __manifest__.py\n#\n##############################################################################\nfrom odoo.addons.message_center_compassion.mappings.base_mapping import \\\n OnrampMapping\n\n\nclass HouseHoldMapping(OnrampMapping):\n ODOO_MODEL = 'compassion.household'\n\n CONNECT_MAPPING = {\n \"BeneficiaryHouseholdMemberList\": ('member_ids',\n 'compassion.household.member'),\n \"BeneficiaryHouseholdMemberDetails\": ('member_ids',\n 'compassion.household.member'),\n \"FemaleGuardianEmploymentStatus\": 'female_guardian_job_type',\n \"FemaleGuardianOccupation\": 'female_guardian_job',\n \"Household_ID\": \"household_id\",\n \"Household_Name\": \"name\",\n \"IsNaturalFatherLivingWithChild\": 'father_living_with_child',\n \"IsNaturalMotherLivingWithChild\": 'mother_living_with_child',\n \"MaleGuardianEmploymentStatus\": 'male_guardian_job_type',\n \"MaleGuardianOccupation\": \"male_guardian_job\",\n \"NaturalFatherAlive\": \"father_alive\",\n \"NaturalMotherAlive\": \"mother_alive\",\n \"NumberOfSiblingBeneficiaries\": \"number_beneficiaries\",\n \"ParentsMaritalStatus\": \"marital_status\",\n \"ParentsTogether\": \"parents_together\",\n 'RevisedValues': 'revised_value_ids',\n\n # Not define\n \"SourceKitName\": None,\n }\n\n def _process_odoo_data(self, odoo_data):\n # Unlink old revised values and create new ones\n if isinstance(odoo_data.get('revised_value_ids'), list):\n household = self.env[self.ODOO_MODEL].search(\n [('household_id', '=', odoo_data['household_id'])])\n household.revised_value_ids.unlink()\n for value in odoo_data['revised_value_ids']:\n self.env['compassion.major.revision'].create({\n 'name': value,\n 'household_id': household.id,\n })\n del odoo_data['revised_value_ids']\n\n # Replace dict by a tuple for the ORM update/create\n if 'member_ids' in odoo_data:\n # Remove all members\n household = self.env[self.ODOO_MODEL].search(\n [('household_id', '=', odoo_data['household_id'])])\n household.member_ids.unlink()\n\n member_list = list()\n for member in odoo_data['member_ids']:\n orm_tuple = (0, 0, member)\n member_list.append(orm_tuple)\n odoo_data['member_ids'] = member_list or False\n\n for key in odoo_data.iterkeys():\n val = odoo_data[key]\n if isinstance(val, basestring) and val.lower() in (\n 'null', 'false', 'none', 'other', 'unknown'):\n odoo_data[key] = False\n\n\nclass HouseholdMemberMapping(OnrampMapping):\n ODOO_MODEL = 'compassion.household.member'\n\n CONNECT_MAPPING = {\n \"Beneficiary_GlobalID\": ('child_id.global_id', 'compassion.child'),\n \"Beneficiary_LocalID\": 'beneficiary_local_id',\n \"FullName\": None,\n \"HouseholdMemberRole\": 'role',\n \"HouseholdMember_Name\": 'name',\n \"IsCaregiver\": 'is_caregiver',\n \"IsPrimaryCaregiver\": 'is_primary_caregiver',\n }\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":8306735830094433000,"string":"8,306,735,830,094,433,000"},"line_mean":{"kind":"number","value":40.1162790698,"string":"40.116279"},"line_max":{"kind":"number","value":78,"string":"78"},"alpha_frac":{"kind":"number","value":0.5653280543,"string":"0.565328"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.694879832810867,"string":"3.69488"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":75,"cells":{"repo_name":{"kind":"string","value":"go-bears/nupic"},"path":{"kind":"string","value":"src/nupic/encoders/category.py"},"copies":{"kind":"string","value":"39"},"size":{"kind":"string","value":"7798"},"content":{"kind":"string","value":"# ----------------------------------------------------------------------\n# Numenta Platform for Intelligent Computing (NuPIC)\n# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement\n# with Numenta, Inc., for a separate license for this software code, the\n# following terms and conditions apply:\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU Affero Public License for more details.\n#\n# You should have received a copy of the GNU Affero Public License\n# along with this program. If not, see http://www.gnu.org/licenses.\n#\n# http://numenta.org/licenses/\n# ----------------------------------------------------------------------\n\nimport numpy\n\nfrom nupic.data.fieldmeta import FieldMetaType\nfrom nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA\nfrom nupic.encoders.base import Encoder, EncoderResult\nfrom nupic.encoders.scalar import ScalarEncoder\n\n\n\nUNKNOWN = \"\"\n\n\n\nclass CategoryEncoder(Encoder):\n \"\"\"Encodes a list of discrete categories (described by strings), that aren't\n related to each other, so we never emit a mixture of categories.\n\n The value of zero is reserved for \"unknown category\"\n\n Internally we use a ScalarEncoder with a radius of 1, but since we only encode\n integers, we never get mixture outputs.\n\n The SDRCategoryEncoder uses a different method to encode categories\"\"\"\n\n\n def __init__(self, w, categoryList, name=\"category\", verbosity=0, forced=False):\n \"\"\"params:\n forced (default False) : if True, skip checks for parameters' settings; see encoders/scalar.py for details\n \"\"\"\n\n self.encoders = None\n self.verbosity = verbosity\n\n # number of categories includes \"unknown\"\n self.ncategories = len(categoryList) + 1\n\n self.categoryToIndex = dict()\n self.indexToCategory = dict()\n self.indexToCategory[0] = UNKNOWN\n for i in xrange(len(categoryList)):\n self.categoryToIndex[categoryList[i]] = i+1\n self.indexToCategory[i+1] = categoryList[i]\n\n self.encoder = ScalarEncoder(w, minval=0, maxval=self.ncategories - 1,\n radius=1, periodic=False, forced=forced)\n self.width = w * self.ncategories\n assert self.encoder.getWidth() == self.width\n\n self.description = [(name, 0)]\n self.name = name\n\n # These are used to support the topDownCompute method\n self._topDownMappingM = None\n\n # This gets filled in by getBucketValues\n self._bucketValues = None\n\n\n def getDecoderOutputFieldTypes(self):\n \"\"\" [Encoder class virtual method override]\n \"\"\"\n # TODO: change back to string meta-type after the decoding logic is fixed\n # to output strings instead of internal index values.\n #return (FieldMetaType.string,)\n return (FieldMetaType.integer,)\n\n\n def getWidth(self):\n return self.width\n\n\n def getDescription(self):\n return self.description\n\n\n def getScalars(self, input):\n \"\"\" See method description in base.py \"\"\"\n if input == SENTINEL_VALUE_FOR_MISSING_DATA:\n return numpy.array([None])\n else:\n return numpy.array([self.categoryToIndex.get(input, 0)])\n\n\n def getBucketIndices(self, input):\n \"\"\" See method description in base.py \"\"\"\n\n # Get the bucket index from the underlying scalar encoder\n if input == SENTINEL_VALUE_FOR_MISSING_DATA:\n return [None]\n else:\n return self.encoder.getBucketIndices(self.categoryToIndex.get(input, 0))\n\n\n def encodeIntoArray(self, input, output):\n # if not found, we encode category 0\n if input == SENTINEL_VALUE_FOR_MISSING_DATA:\n output[0:] = 0\n val = \"\"\n else:\n val = self.categoryToIndex.get(input, 0)\n self.encoder.encodeIntoArray(val, output)\n\n if self.verbosity >= 2:\n print \"input:\", input, \"va:\", val, \"output:\", output\n print \"decoded:\", self.decodedToStr(self.decode(output))\n\n\n def decode(self, encoded, parentFieldName=''):\n \"\"\" See the function description in base.py\n \"\"\"\n\n # Get the scalar values from the underlying scalar encoder\n (fieldsDict, fieldNames) = self.encoder.decode(encoded)\n if len(fieldsDict) == 0:\n return (fieldsDict, fieldNames)\n\n # Expect only 1 field\n assert(len(fieldsDict) == 1)\n\n # Get the list of categories the scalar values correspond to and\n # generate the description from the category name(s).\n (inRanges, inDesc) = fieldsDict.values()[0]\n outRanges = []\n desc = \"\"\n for (minV, maxV) in inRanges:\n minV = int(round(minV))\n maxV = int(round(maxV))\n outRanges.append((minV, maxV))\n while minV <= maxV:\n if len(desc) > 0:\n desc += \", \"\n desc += self.indexToCategory[minV]\n minV += 1\n\n # Return result\n if parentFieldName != '':\n fieldName = \"%s.%s\" % (parentFieldName, self.name)\n else:\n fieldName = self.name\n return ({fieldName: (outRanges, desc)}, [fieldName])\n\n\n def closenessScores(self, expValues, actValues, fractional=True,):\n \"\"\" See the function description in base.py\n\n kwargs will have the keyword \"fractional\", which is ignored by this encoder\n \"\"\"\n\n expValue = expValues[0]\n actValue = actValues[0]\n\n if expValue == actValue:\n closeness = 1.0\n else:\n closeness = 0.0\n\n if not fractional:\n closeness = 1.0 - closeness\n\n return numpy.array([closeness])\n\n\n def getBucketValues(self):\n \"\"\" See the function description in base.py \"\"\"\n\n if self._bucketValues is None:\n numBuckets = len(self.encoder.getBucketValues())\n self._bucketValues = []\n for bucketIndex in range(numBuckets):\n self._bucketValues.append(self.getBucketInfo([bucketIndex])[0].value)\n\n return self._bucketValues\n\n\n def getBucketInfo(self, buckets):\n \"\"\" See the function description in base.py\n \"\"\"\n\n # For the category encoder, the bucket index is the category index\n bucketInfo = self.encoder.getBucketInfo(buckets)[0]\n\n categoryIndex = int(round(bucketInfo.value))\n category = self.indexToCategory[categoryIndex]\n\n return [EncoderResult(value=category, scalar=categoryIndex,\n encoding=bucketInfo.encoding)]\n\n\n def topDownCompute(self, encoded):\n \"\"\" See the function description in base.py\n \"\"\"\n\n encoderResult = self.encoder.topDownCompute(encoded)[0]\n value = encoderResult.value\n categoryIndex = int(round(value))\n category = self.indexToCategory[categoryIndex]\n\n return EncoderResult(value=category, scalar=categoryIndex,\n encoding=encoderResult.encoding)\n\n\n @classmethod\n def read(cls, proto):\n encoder = object.__new__(cls)\n\n encoder.verbosity = proto.verbosity\n encoder.encoder = ScalarEncoder.read(proto.encoder)\n encoder.width = proto.width\n encoder.description = [(proto.name, 0)]\n encoder.name = proto.name\n encoder.indexToCategory = {x.index: x.category\n for x in proto.indexToCategory}\n encoder.categoryToIndex = {category: index\n for index, category\n in encoder.indexToCategory.items()\n if category != UNKNOWN}\n encoder._topDownMappingM = None\n encoder._bucketValues = None\n\n return encoder\n\n\n def write(self, proto):\n proto.width = self.width\n proto.indexToCategory = [\n {\"index\": index, \"category\": category}\n for index, category in self.indexToCategory.items()\n ]\n proto.name = self.name\n proto.verbosity = self.verbosity\n self.encoder.write(proto.encoder)\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":-742178916835247100,"string":"-742,178,916,835,247,100"},"line_mean":{"kind":"number","value":29.9444444444,"string":"29.944444"},"line_max":{"kind":"number","value":113,"string":"113"},"alpha_frac":{"kind":"number","value":0.6623493203,"string":"0.662349"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.093438320209974,"string":"4.093438"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":76,"cells":{"repo_name":{"kind":"string","value":"dvliman/jaikuengine"},"path":{"kind":"string","value":".google_appengine/lib/django-1.5/tests/regressiontests/extra_regress/models.py"},"copies":{"kind":"string","value":"114"},"size":{"kind":"string","value":"1365"},"content":{"kind":"string","value":"from __future__ import unicode_literals\n\nimport copy\nimport datetime\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass RevisionableModel(models.Model):\n base = models.ForeignKey('self', null=True)\n title = models.CharField(blank=True, max_length=255)\n when = models.DateTimeField(default=datetime.datetime.now)\n\n def __str__(self):\n return \"%s (%s, %s)\" % (self.title, self.id, self.base.id)\n\n def save(self, *args, **kwargs):\n super(RevisionableModel, self).save(*args, **kwargs)\n if not self.base:\n self.base = self\n kwargs.pop('force_insert', None)\n kwargs.pop('force_update', None)\n super(RevisionableModel, self).save(*args, **kwargs)\n\n def new_revision(self):\n new_revision = copy.copy(self)\n new_revision.pk = None\n return new_revision\n\nclass Order(models.Model):\n created_by = models.ForeignKey(User)\n text = models.TextField()\n\n@python_2_unicode_compatible\nclass TestObject(models.Model):\n first = models.CharField(max_length=20)\n second = models.CharField(max_length=20)\n third = models.CharField(max_length=20)\n\n def __str__(self):\n return 'TestObject: %s,%s,%s' % (self.first,self.second,self.third)\n\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":-6502126748451387000,"string":"-6,502,126,748,451,387,000"},"line_mean":{"kind":"number","value":29.3333333333,"string":"29.333333"},"line_max":{"kind":"number","value":75,"string":"75"},"alpha_frac":{"kind":"number","value":0.6673992674,"string":"0.667399"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.611111111111111,"string":"3.611111"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":77,"cells":{"repo_name":{"kind":"string","value":"goFrendiAsgard/kokoropy"},"path":{"kind":"string","value":"kokoropy/packages/sqlalchemy/dialects/mysql/pyodbc.py"},"copies":{"kind":"string","value":"32"},"size":{"kind":"string","value":"2640"},"content":{"kind":"string","value":"# mysql/pyodbc.py\n# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors\n# \n#\n# This module is part of SQLAlchemy and is released under\n# the MIT License: http://www.opensource.org/licenses/mit-license.php\n\n\"\"\"\n\n\n.. dialect:: mysql+pyodbc\n :name: PyODBC\n :dbapi: pyodbc\n :connectstring: mysql+pyodbc://:@\n :url: http://pypi.python.org/pypi/pyodbc/\n\n\nLimitations\n-----------\n\nThe mysql-pyodbc dialect is subject to unresolved character encoding issues\nwhich exist within the current ODBC drivers available.\n(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage\nof OurSQL, MySQLdb, or MySQL-connector/Python.\n\n\"\"\"\n\nfrom .base import MySQLDialect, MySQLExecutionContext\nfrom ...connectors.pyodbc import PyODBCConnector\nfrom ... import util\nimport re\n\n\nclass MySQLExecutionContext_pyodbc(MySQLExecutionContext):\n\n def get_lastrowid(self):\n cursor = self.create_cursor()\n cursor.execute(\"SELECT LAST_INSERT_ID()\")\n lastrowid = cursor.fetchone()[0]\n cursor.close()\n return lastrowid\n\n\nclass MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):\n supports_unicode_statements = False\n execution_ctx_cls = MySQLExecutionContext_pyodbc\n\n pyodbc_driver_name = \"MySQL\"\n\n def __init__(self, **kw):\n # deal with http://code.google.com/p/pyodbc/issues/detail?id=25\n kw.setdefault('convert_unicode', True)\n super(MySQLDialect_pyodbc, self).__init__(**kw)\n\n def _detect_charset(self, connection):\n \"\"\"Sniff out the character set in use for connection results.\"\"\"\n\n # Prefer 'character_set_results' for the current connection over the\n # value in the driver. SET NAMES or individual variable SETs will\n # change the charset without updating the driver's view of the world.\n #\n # If it's decided that issuing that sort of SQL leaves you SOL, then\n # this can prefer the driver value.\n rs = connection.execute(\"SHOW VARIABLES LIKE 'character_set%%'\")\n opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])\n for key in ('character_set_connection', 'character_set'):\n if opts.get(key, None):\n return opts[key]\n\n util.warn(\"Could not detect the connection character set. \"\n \"Assuming latin1.\")\n return 'latin1'\n\n def _extract_error_code(self, exception):\n m = re.compile(r\"\\((\\d+)\\)\").search(str(exception.args))\n c = m.group(1)\n if c:\n return int(c)\n else:\n return None\n\ndialect = MySQLDialect_pyodbc\n"},"license":{"kind":"string","value":"mit"},"hash":{"kind":"number","value":-6467777041147659000,"string":"-6,467,777,041,147,659,000"},"line_mean":{"kind":"number","value":31.1951219512,"string":"31.195122"},"line_max":{"kind":"number","value":77,"string":"77"},"alpha_frac":{"kind":"number","value":0.6606060606,"string":"0.660606"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.776824034334764,"string":"3.776824"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":78,"cells":{"repo_name":{"kind":"string","value":"chienlieu2017/it_management"},"path":{"kind":"string","value":"odoo/addons/website_event/controllers/main.py"},"copies":{"kind":"string","value":"7"},"size":{"kind":"string","value":"11571"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport babel.dates\nimport re\nimport werkzeug\nfrom datetime import datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import fields, http, _\nfrom odoo.addons.website.models.website import slug\nfrom odoo.http import request\n\n\nclass WebsiteEventController(http.Controller):\n\n @http.route(['/event', '/event/page/', '/events', '/events/page/'], type='http', auth=\"public\", website=True)\n def events(self, page=1, **searches):\n Event = request.env['event.event']\n EventType = request.env['event.type']\n\n searches.setdefault('date', 'all')\n searches.setdefault('type', 'all')\n searches.setdefault('country', 'all')\n\n domain_search = {}\n\n def sdn(date):\n return fields.Datetime.to_string(date.replace(hour=23, minute=59, second=59))\n\n def sd(date):\n return fields.Datetime.to_string(date)\n today = datetime.today()\n dates = [\n ['all', _('Next Events'), [(\"date_end\", \">\", sd(today))], 0],\n ['today', _('Today'), [\n (\"date_end\", \">\", sd(today)),\n (\"date_begin\", \"<\", sdn(today))],\n 0],\n ['week', _('This Week'), [\n (\"date_end\", \">=\", sd(today + relativedelta(days=-today.weekday()))),\n (\"date_begin\", \"<\", sdn(today + relativedelta(days=6-today.weekday())))],\n 0],\n ['nextweek', _('Next Week'), [\n (\"date_end\", \">=\", sd(today + relativedelta(days=7-today.weekday()))),\n (\"date_begin\", \"<\", sdn(today + relativedelta(days=13-today.weekday())))],\n 0],\n ['month', _('This month'), [\n (\"date_end\", \">=\", sd(today.replace(day=1))),\n (\"date_begin\", \"<\", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],\n 0],\n ['nextmonth', _('Next month'), [\n (\"date_end\", \">=\", sd(today.replace(day=1) + relativedelta(months=1))),\n (\"date_begin\", \"<\", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],\n 0],\n ['old', _('Old Events'), [\n (\"date_end\", \"<\", today.strftime('%Y-%m-%d 00:00:00'))],\n 0],\n ]\n\n # search domains\n # TDE note: WTF ???\n current_date = None\n current_type = None\n current_country = None\n for date in dates:\n if searches[\"date\"] == date[0]:\n domain_search[\"date\"] = date[2]\n if date[0] != 'all':\n current_date = date[1]\n if searches[\"type\"] != 'all':\n current_type = EventType.browse(int(searches['type']))\n domain_search[\"type\"] = [(\"event_type_id\", \"=\", int(searches[\"type\"]))]\n\n if searches[\"country\"] != 'all' and searches[\"country\"] != 'online':\n current_country = request.env['res.country'].browse(int(searches['country']))\n domain_search[\"country\"] = ['|', (\"country_id\", \"=\", int(searches[\"country\"])), (\"country_id\", \"=\", False)]\n elif searches[\"country\"] == 'online':\n domain_search[\"country\"] = [(\"country_id\", \"=\", False)]\n\n def dom_without(without):\n domain = [('state', \"in\", ['draft', 'confirm', 'done'])]\n for key, search in domain_search.items():\n if key != without:\n domain += search\n return domain\n\n # count by domains without self search\n for date in dates:\n if date[0] != 'old':\n date[3] = Event.search_count(dom_without('date') + date[2])\n\n domain = dom_without('type')\n types = Event.read_group(domain, [\"id\", \"event_type_id\"], groupby=[\"event_type_id\"], orderby=\"event_type_id\")\n types.insert(0, {\n 'event_type_id_count': sum([int(type['event_type_id_count']) for type in types]),\n 'event_type_id': (\"all\", _(\"All Categories\"))\n })\n\n domain = dom_without('country')\n countries = Event.read_group(domain, [\"id\", \"country_id\"], groupby=\"country_id\", orderby=\"country_id\")\n countries.insert(0, {\n 'country_id_count': sum([int(country['country_id_count']) for country in countries]),\n 'country_id': (\"all\", _(\"All Countries\"))\n })\n\n step = 10 # Number of events per page\n event_count = Event.search_count(dom_without(\"none\"))\n pager = request.website.pager(\n url=\"/event\",\n url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')},\n total=event_count,\n page=page,\n step=step,\n scope=5)\n\n order = 'website_published desc, date_begin'\n if searches.get('date', 'all') == 'old':\n order = 'website_published desc, date_begin desc'\n events = Event.search(dom_without(\"none\"), limit=step, offset=pager['offset'], order=order)\n\n values = {\n 'current_date': current_date,\n 'current_country': current_country,\n 'current_type': current_type,\n 'event_ids': events, # event_ids used in website_event_track so we keep name as it is\n 'dates': dates,\n 'types': types,\n 'countries': countries,\n 'pager': pager,\n 'searches': searches,\n 'search_path': \"?%s\" % werkzeug.url_encode(searches),\n }\n\n return request.render(\"website_event.index\", values)\n\n @http.route(['/event//page/'], type='http', auth=\"public\", website=True)\n def event_page(self, event, page, **post):\n values = {\n 'event': event,\n 'main_object': event\n }\n\n if '.' not in page:\n page = 'website_event.%s' % page\n\n try:\n request.website.get_template(page)\n except ValueError:\n # page not found\n values['path'] = re.sub(r\"^website_event\\.\", '', page)\n values['from_template'] = 'website_event.default_page' # .strip('website_event.')\n page = 'website.page_404'\n\n return request.render(page, values)\n\n @http.route(['/event/'], type='http', auth=\"public\", website=True)\n def event(self, event, **post):\n if event.menu_id and event.menu_id.child_id:\n target_url = event.menu_id.child_id[0].url\n else:\n target_url = '/event/%s/register' % str(event.id)\n if post.get('enable_editor') == '1':\n target_url += '?enable_editor=1'\n return request.redirect(target_url)\n\n @http.route(['/event//register'], type='http', auth=\"public\", website=True)\n def event_register(self, event, **post):\n values = {\n 'event': event,\n 'main_object': event,\n 'range': range,\n }\n return request.render(\"website_event.event_description_full\", values)\n\n @http.route('/event/add_event', type='http', auth=\"user\", methods=['POST'], website=True)\n def add_event(self, event_name=\"New Event\", **kwargs):\n event = self._add_event(event_name, request.context)\n return request.redirect(\"/event/%s/register?enable_editor=1\" % slug(event))\n\n def _add_event(self, event_name=None, context=None, **kwargs):\n if not event_name:\n event_name = _(\"New Event\")\n date_begin = datetime.today() + timedelta(days=(14))\n vals = {\n 'name': event_name,\n 'date_begin': fields.Date.to_string(date_begin),\n 'date_end': fields.Date.to_string((date_begin + timedelta(days=(1)))),\n 'seats_available': 1000,\n }\n return request.env['event.event'].with_context(context or {}).create(vals)\n\n def get_formated_date(self, event):\n start_date = fields.Datetime.from_string(event.date_begin).date()\n end_date = fields.Datetime.from_string(event.date_end).date()\n month = babel.dates.get_month_names('abbreviated', locale=event.env.context.get('lang', 'en_US'))[start_date.month]\n return ('%s %s%s') % (month, start_date.strftime(\"%e\"), (end_date != start_date and (\"-\" + end_date.strftime(\"%e\")) or \"\"))\n\n @http.route('/event/get_country_event_list', type='http', auth='public', website=True)\n def get_country_events(self, **post):\n Event = request.env['event.event']\n country_code = request.session['geoip'].get('country_code')\n result = {'events': [], 'country': False}\n events = None\n if country_code:\n country = request.env['res.country'].search([('code', '=', country_code)], limit=1)\n events = Event.search(['|', ('address_id', '=', None), ('country_id.code', '=', country_code), ('date_begin', '>=', '%s 00:00:00' % fields.Date.today()), ('state', '=', 'confirm')], order=\"date_begin\")\n if not events:\n events = Event.search([('date_begin', '>=', '%s 00:00:00' % fields.Date.today()), ('state', '=', 'confirm')], order=\"date_begin\")\n for event in events:\n if country_code and event.country_id.code == country_code:\n result['country'] = country\n result['events'].append({\n \"date\": self.get_formated_date(event),\n \"event\": event,\n \"url\": event.website_url})\n return request.render(\"website_event.country_events_list\", result)\n\n def _process_tickets_details(self, data):\n nb_register = int(data.get('nb_register-0', 0))\n if nb_register:\n return [{'id': 0, 'name': 'Registration', 'quantity': nb_register, 'price': 0}]\n return []\n\n @http.route(['/event//registration/new'], type='json', auth=\"public\", methods=['POST'], website=True)\n def registration_new(self, event, **post):\n tickets = self._process_tickets_details(post)\n if not tickets:\n return request.redirect(\"/event/%s\" % slug(event))\n return request.env['ir.ui.view'].render_template(\"website_event.registration_attendee_details\", {'tickets': tickets, 'event': event})\n\n def _process_registration_details(self, details):\n ''' Process data posted from the attendee details form. '''\n registrations = {}\n global_values = {}\n for key, value in details.iteritems():\n counter, field_name = key.split('-', 1)\n if counter == '0':\n global_values[field_name] = value\n else:\n registrations.setdefault(counter, dict())[field_name] = value\n for key, value in global_values.iteritems():\n for registration in registrations.values():\n registration[key] = value\n return registrations.values()\n\n @http.route(['/event//registration/confirm'], type='http', auth=\"public\", methods=['POST'], website=True)\n def registration_confirm(self, event, **post):\n Attendees = request.env['event.registration']\n registrations = self._process_registration_details(post)\n\n for registration in registrations:\n registration['event_id'] = event\n Attendees += Attendees.sudo().create(\n Attendees._prepare_attendee_values(registration))\n\n return request.render(\"website_event.registration_complete\", {\n 'attendees': Attendees,\n 'event': event,\n })\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":8617715063772964000,"string":"8,617,715,063,772,964,000"},"line_mean":{"kind":"number","value":43.6756756757,"string":"43.675676"},"line_max":{"kind":"number","value":213,"string":"213"},"alpha_frac":{"kind":"number","value":0.5514648691,"string":"0.551465"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.8815833612881585,"string":"3.881583"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":79,"cells":{"repo_name":{"kind":"string","value":"derekjchow/models"},"path":{"kind":"string","value":"research/deeplab/core/nas_cell.py"},"copies":{"kind":"string","value":"1"},"size":{"kind":"string","value":"8432"},"content":{"kind":"string","value":"# Copyright 2018 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Cell structure used by NAS.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom deeplab.core.utils import resize_bilinear\nfrom deeplab.core.utils import scale_dimension\n\narg_scope = tf.contrib.framework.arg_scope\nslim = tf.contrib.slim\n\n\nclass NASBaseCell(object):\n \"\"\"NASNet Cell class that is used as a 'layer' in image architectures.\n See https://arxiv.org/abs/1707.07012 and https://arxiv.org/abs/1712.00559.\n\n Args:\n num_conv_filters: The number of filters for each convolution operation.\n operations: List of operations that are performed in the NASNet Cell in\n order.\n used_hiddenstates: Binary array that signals if the hiddenstate was used\n within the cell. This is used to determine what outputs of the cell\n should be concatenated together.\n hiddenstate_indices: Determines what hiddenstates should be combined\n together with the specified operations to create the NASNet cell.\n \"\"\"\n\n def __init__(self, num_conv_filters, operations, used_hiddenstates,\n hiddenstate_indices, drop_path_keep_prob, total_num_cells,\n total_training_steps):\n if len(hiddenstate_indices) != len(operations):\n raise ValueError(\n 'Number of hiddenstate_indices and operations should be the same.')\n if len(operations) % 2:\n raise ValueError('Number of operations should be even.')\n self._num_conv_filters = num_conv_filters\n self._operations = operations\n self._used_hiddenstates = used_hiddenstates\n self._hiddenstate_indices = hiddenstate_indices\n self._drop_path_keep_prob = drop_path_keep_prob\n self._total_num_cells = total_num_cells\n self._total_training_steps = total_training_steps\n\n def __call__(self, net, scope, filter_scaling, stride, prev_layer, cell_num):\n \"\"\"Runs the conv cell.\"\"\"\n self._cell_num = cell_num\n self._filter_scaling = filter_scaling\n self._filter_size = int(self._num_conv_filters * filter_scaling)\n\n with tf.variable_scope(scope):\n net = self._cell_base(net, prev_layer)\n for i in range(len(self._operations) // 2):\n with tf.variable_scope('comb_iter_{}'.format(i)):\n h1 = net[self._hiddenstate_indices[i * 2]]\n h2 = net[self._hiddenstate_indices[i * 2 + 1]]\n with tf.variable_scope('left'):\n h1 = self._apply_conv_operation(\n h1, self._operations[i * 2], stride,\n self._hiddenstate_indices[i * 2] < 2)\n with tf.variable_scope('right'):\n h2 = self._apply_conv_operation(\n h2, self._operations[i * 2 + 1], stride,\n self._hiddenstate_indices[i * 2 + 1] < 2)\n with tf.variable_scope('combine'):\n h = h1 + h2\n net.append(h)\n\n with tf.variable_scope('cell_output'):\n net = self._combine_unused_states(net)\n\n return net\n\n def _cell_base(self, net, prev_layer):\n \"\"\"Runs the beginning of the conv cell before the chosen ops are run.\"\"\"\n filter_size = self._filter_size\n\n if prev_layer is None:\n prev_layer = net\n else:\n if net.shape[2] != prev_layer.shape[2]:\n prev_layer = resize_bilinear(\n prev_layer, tf.shape(net)[1:3], prev_layer.dtype)\n if filter_size != prev_layer.shape[3]:\n prev_layer = tf.nn.relu(prev_layer)\n prev_layer = slim.conv2d(prev_layer, filter_size, 1, scope='prev_1x1')\n prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')\n\n net = tf.nn.relu(net)\n net = slim.conv2d(net, filter_size, 1, scope='1x1')\n net = slim.batch_norm(net, scope='beginning_bn')\n net = tf.split(axis=3, num_or_size_splits=1, value=net)\n net.append(prev_layer)\n return net\n\n def _apply_conv_operation(self, net, operation, stride,\n is_from_original_input):\n \"\"\"Applies the predicted conv operation to net.\"\"\"\n if stride > 1 and not is_from_original_input:\n stride = 1\n input_filters = net.shape[3]\n filter_size = self._filter_size\n if 'separable' in operation:\n num_layers = int(operation.split('_')[-1])\n kernel_size = int(operation.split('x')[0][-1])\n for layer_num in range(num_layers):\n net = tf.nn.relu(net)\n net = slim.separable_conv2d(\n net,\n filter_size,\n kernel_size,\n depth_multiplier=1,\n scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),\n stride=stride)\n net = slim.batch_norm(\n net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))\n stride = 1\n elif 'atrous' in operation:\n kernel_size = int(operation.split('x')[0][-1])\n net = tf.nn.relu(net)\n if stride == 2:\n scaled_height = scale_dimension(tf.shape(net)[1], 0.5)\n scaled_width = scale_dimension(tf.shape(net)[2], 0.5)\n net = resize_bilinear(net, [scaled_height, scaled_width], net.dtype)\n net = slim.conv2d(net, filter_size, kernel_size, rate=1,\n scope='atrous_{0}x{0}'.format(kernel_size))\n else:\n net = slim.conv2d(net, filter_size, kernel_size, rate=2,\n scope='atrous_{0}x{0}'.format(kernel_size))\n net = slim.batch_norm(net, scope='bn_atr_{0}x{0}'.format(kernel_size))\n elif operation in ['none']:\n if stride > 1 or (input_filters != filter_size):\n net = tf.nn.relu(net)\n net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')\n net = slim.batch_norm(net, scope='bn_1')\n elif 'pool' in operation:\n pooling_type = operation.split('_')[0]\n pooling_shape = int(operation.split('_')[-1].split('x')[0])\n if pooling_type == 'avg':\n net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding='SAME')\n elif pooling_type == 'max':\n net = slim.max_pool2d(net, pooling_shape, stride=stride, padding='SAME')\n else:\n raise ValueError('Unimplemented pooling type: ', pooling_type)\n if input_filters != filter_size:\n net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')\n net = slim.batch_norm(net, scope='bn_1')\n else:\n raise ValueError('Unimplemented operation', operation)\n\n if operation != 'none':\n net = self._apply_drop_path(net)\n return net\n\n def _combine_unused_states(self, net):\n \"\"\"Concatenates the unused hidden states of the cell.\"\"\"\n used_hiddenstates = self._used_hiddenstates\n states_to_combine = ([\n h for h, is_used in zip(net, used_hiddenstates) if not is_used])\n net = tf.concat(values=states_to_combine, axis=3)\n return net\n\n @tf.contrib.framework.add_arg_scope\n def _apply_drop_path(self, net):\n \"\"\"Apply drop_path regularization.\"\"\"\n drop_path_keep_prob = self._drop_path_keep_prob\n if drop_path_keep_prob < 1.0:\n # Scale keep prob by layer number.\n assert self._cell_num != -1\n layer_ratio = (self._cell_num + 1) / float(self._total_num_cells)\n drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)\n # Decrease keep prob over time.\n current_step = tf.cast(tf.train.get_or_create_global_step(), tf.float32)\n current_ratio = tf.minimum(1.0, current_step / self._total_training_steps)\n drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))\n # Drop path.\n noise_shape = [tf.shape(net)[0], 1, 1, 1]\n random_tensor = drop_path_keep_prob\n random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)\n binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)\n keep_prob_inv = tf.cast(1.0 / drop_path_keep_prob, net.dtype)\n net = net * keep_prob_inv * binary_tensor\n return net\n"},"license":{"kind":"string","value":"apache-2.0"},"hash":{"kind":"number","value":4058546181967095300,"string":"4,058,546,181,967,095,300"},"line_mean":{"kind":"number","value":41.3718592965,"string":"41.371859"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.6290322581,"string":"0.629032"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.5118700541441066,"string":"3.51187"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":80,"cells":{"repo_name":{"kind":"string","value":"sgarrity/bedrock"},"path":{"kind":"string","value":"lib/l10n_utils/management/commands/fluent.py"},"copies":{"kind":"string","value":"8"},"size":{"kind":"string","value":"3597"},"content":{"kind":"string","value":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom pathlib import Path\nimport textwrap\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n help = 'Convert a template to use Fluent for l10n'\n requires_system_checks = False\n\n def add_arguments(self, parser):\n subparsers = parser.add_subparsers(\n title='subcommand', dest='subcommand'\n )\n subparsers.add_parser('help')\n\n recipe_parser = subparsers.add_parser(\n 'recipe',\n description='Create migration recipe from template'\n )\n recipe_parser.add_argument('template', type=Path)\n\n ftl_parser = subparsers.add_parser(\n 'ftl',\n description='Create Fluent file with existing recipe'\n )\n ftl_parser.add_argument(\n 'recipe_or_template', type=Path,\n help='Path to the recipe or the template from which the recipe was generated'\n )\n ftl_parser.add_argument(\n 'locales', nargs='*', default=['en'], metavar='ab-CD',\n help='Locale codes to create ftl files for'\n )\n\n template_parser = subparsers.add_parser(\n 'template',\n description='Create template_ftl.html file with existing recipe'\n )\n template_parser.add_argument('template', type=Path)\n\n activation_parser = subparsers.add_parser(\n 'activation',\n description='Port activation data from .lang for a recipe/template'\n )\n activation_parser.add_argument(\n 'recipe_or_template', type=Path,\n help='Path to the recipe or the template from which the recipe was generated'\n )\n\n def handle(self, subcommand, **kwargs):\n if subcommand == 'recipe':\n return self.create_recipe(**kwargs)\n if subcommand == 'ftl':\n return self.create_ftl(**kwargs)\n if subcommand == 'template':\n return self.create_template(**kwargs)\n if subcommand == 'activation':\n return self.activation(**kwargs)\n return self.handle_help(**kwargs)\n\n def handle_help(self, **kwargs):\n self.stdout.write(textwrap.dedent('''\\\n To migrate a template from .lang to Fluent, use the subcommands like so\n\n ./manage.py fluent recipe bedrock/app/templates/app/some.html\n\n # edit IDs in lib/fluent_migrations/app/some.py\n\n ./manage.py fluent template bedrock/app/templates/app/some.html\n ./manage.py fluent ftl bedrock/app/templates/app/some.html\n\n More documentation on https://bedrock.readthedocs.io/en/latest/fluent-conversion.html.\n '''))\n\n def create_recipe(self, template, **kwargs):\n from ._fluent_recipe import Recipe\n recipe = Recipe(self)\n recipe.handle(template)\n\n def create_template(self, template, **kwargs):\n from ._fluent_templater import Templater\n templater = Templater(self)\n templater.handle(template)\n\n def create_ftl(self, recipe_or_template, locales, **kwargs):\n from ._fluent_ftl import FTLCreator\n ftl_creator = FTLCreator(self)\n for locale in locales:\n ftl_creator.handle(recipe_or_template, locale)\n\n def activation(self, recipe_or_template, **kwargs):\n from ._fluent_activation import Activation\n activation = Activation(self)\n activation.handle(recipe_or_template)\n"},"license":{"kind":"string","value":"mpl-2.0"},"hash":{"kind":"number","value":-920462829349169400,"string":"-920,462,829,349,169,400"},"line_mean":{"kind":"number","value":35.3333333333,"string":"35.333333"},"line_max":{"kind":"number","value":98,"string":"98"},"alpha_frac":{"kind":"number","value":0.6271893244,"string":"0.627189"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.221830985915493,"string":"4.221831"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":81,"cells":{"repo_name":{"kind":"string","value":"kevinmel2000/brython"},"path":{"kind":"string","value":"www/src/Lib/test/unittests/test_cgitb.py"},"copies":{"kind":"string","value":"113"},"size":{"kind":"string","value":"2551"},"content":{"kind":"string","value":"from test.support import run_unittest\nfrom test.script_helper import assert_python_failure, temp_dir\nimport unittest\nimport sys\nimport cgitb\n\nclass TestCgitb(unittest.TestCase):\n\n def test_fonts(self):\n text = \"Hello Robbie!\"\n self.assertEqual(cgitb.small(text), \"{}\".format(text))\n self.assertEqual(cgitb.strong(text), \"{}\".format(text))\n self.assertEqual(cgitb.grey(text),\n '{}'.format(text))\n\n def test_blanks(self):\n self.assertEqual(cgitb.small(\"\"), \"\")\n self.assertEqual(cgitb.strong(\"\"), \"\")\n self.assertEqual(cgitb.grey(\"\"), \"\")\n\n def test_html(self):\n try:\n raise ValueError(\"Hello World\")\n except ValueError as err:\n # If the html was templated we could do a bit more here.\n # At least check that we get details on what we just raised.\n html = cgitb.html(sys.exc_info())\n self.assertIn(\"ValueError\", html)\n self.assertIn(str(err), html)\n\n def test_text(self):\n try:\n raise ValueError(\"Hello World\")\n except ValueError as err:\n text = cgitb.text(sys.exc_info())\n self.assertIn(\"ValueError\", text)\n self.assertIn(\"Hello World\", text)\n\n def test_syshook_no_logdir_default_format(self):\n with temp_dir() as tracedir:\n rc, out, err = assert_python_failure(\n '-c',\n ('import cgitb; cgitb.enable(logdir=%s); '\n 'raise ValueError(\"Hello World\")') % repr(tracedir))\n out = out.decode(sys.getfilesystemencoding())\n self.assertIn(\"ValueError\", out)\n self.assertIn(\"Hello World\", out)\n # By default we emit HTML markup.\n self.assertIn('

', out)\n self.assertIn('

', out)\n\n def test_syshook_no_logdir_text_format(self):\n # Issue 12890: we were emitting the

tag in text mode.\n with temp_dir() as tracedir:\n rc, out, err = assert_python_failure(\n '-c',\n ('import cgitb; cgitb.enable(format=\"text\", logdir=%s); '\n 'raise ValueError(\"Hello World\")') % repr(tracedir))\n out = out.decode(sys.getfilesystemencoding())\n self.assertIn(\"ValueError\", out)\n self.assertIn(\"Hello World\", out)\n self.assertNotIn('

', out)\n self.assertNotIn('

', out)\n\n\ndef test_main():\n run_unittest(TestCgitb)\n\nif __name__ == \"__main__\":\n test_main()\n"},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":-8993424027276596000,"string":"-8,993,424,027,276,596,000"},"line_mean":{"kind":"number","value":35.4428571429,"string":"35.442857"},"line_max":{"kind":"number","value":80,"string":"80"},"alpha_frac":{"kind":"number","value":0.573500588,"string":"0.573501"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":3.900611620795107,"string":"3.900612"},"config_test":{"kind":"bool","value":true,"string":"true"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":82,"cells":{"repo_name":{"kind":"string","value":"rhololkeolke/apo-website-devin"},"path":{"kind":"string","value":"src/application/facebook/facebook.py"},"copies":{"kind":"string","value":"2"},"size":{"kind":"string","value":"8731"},"content":{"kind":"string","value":"\"\"\"\nThis module contains helper classes and methods\nfor the facebook integration module\n\n.. module:: application.facebook.facebook\n\n.. moduleauthor:: Devin Schwab \n\"\"\"\n\nimport facebooksdk as fb\nimport models\n\nfrom flask import flash\n\nclass AlbumList(object):\n def __init__(self, token):\n \"\"\"\n Given an an access token this class\n will get all albums for the object associated with the token\n (i.e. a page or a user)\n\n It will lazily construct an Album instance for each of\n the album ids returned\n \"\"\"\n \n self.graph = fb.GraphAPI(token.access_token)\n albums_data = self.graph.get_connections('me', 'albums')['data']\n\n self.album_ids = {}\n self.album_names = {}\n for data in albums_data:\n self.album_ids[data['id']] = data\n self.album_names[data['name']] = data\n\n def get_albums_by_name(self, names):\n \"\"\"\n Given a list of names this method will\n return album objects for each matching name.\n\n If a name is not found then it is silently ignored.\n\n This method returns a dictionary mapping name\n to Album object.\n \"\"\"\n\n albums = {}\n for name in names:\n if name in self.album_names:\n if isinstance(self.album_names[name], Album):\n albums[name] = self.album_names[name]\n else:\n self.album_names[name] = Album(graph=self.graph,\n album_data=self.album_names[name])\n self.album_ids[self.album_names[name].me] = self.album_names[name]\n albums[name] = self.album_names[name]\n return albums\n\n def get_albums_by_id(self, ids):\n \"\"\"\n Given a list of ids this method will\n return album objects for each matching id.\n\n If an id is not found then it is silently ignored.\n\n This method returns a dictionary mapping id to\n Album object\n \"\"\"\n\n albums = {}\n for album_id in ids:\n if album_id in self.album_ids:\n if isinstance(self.album_ids[album_id], Album):\n albums[album_id] = self.album_ids[album_id]\n else:\n self.album_ids[album_id] = Album(graph=self.graph,\n album_data=self.album_ids[album_id])\n self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]\n albums[album_id] = self.album_ids[album_id]\n return albums\n \n\n def get_all_albums_by_id(self):\n \"\"\"\n This method returns a dictionary of all\n albums with album ids as the keys\n \"\"\"\n\n for album_id in self.album_ids:\n if not isinstance(self.album_ids[album_id], Album):\n self.album_ids[album_id] = Album(graph=self.graph,\n album_data=self.album_ids[album_id])\n self.album_names[self.album_ids[album_id].name] = self.album_ids[album_id]\n\n return self.album_ids\n\n def get_all_albums_by_name(self):\n \"\"\"\n This method returns a dictionary of all\n albums with album names as the keys\n \"\"\"\n\n for name in self.album_names:\n if not isinstance(self.album_names[name], Album):\n self.album_names[name] = Album(graph=self.graph,\n album_data=self.album_names[name])\n self.album_ids[self.album_names[name].me] = self.album_names[name]\n\n return self.album_names\n \n \nclass Album(object):\n def __init__(self, graph=None, token=None, album_id=None, album_data=None):\n \"\"\"\n Initializes a new Album object.\n\n If graph is provided then the graph object is saved to this\n instance.\n\n If the token is provided then the graph object for this token\n is created and saved to this instance.\n\n If both are none then an error is raised.\n\n If album_id is provided then the graph object is queried\n for the id and the album object populates itself with this data\n\n If album_data is provided then the graph object is populated\n with the data in the json derived object\n\n If both are None then an error is raised\n \"\"\"\n\n if graph is None and token is None:\n raise TypeError(\"Either a graph object must be provided or a token must be provided\")\n\n if graph is not None:\n self.graph = graph\n query = models.AccessTokenModel.all()\n query.filter('access_token =', graph.access_token)\n\n try:\n self.token = query.fetch(1)[0]\n except IndexError:\n raise TypeError('The token object provided was not an AccessTokenModel instance')\n else:\n self.graph = fb.GraphAPI(token.access_token)\n self.token = token\n\n if album_id is None and album_data is None:\n raise TypeError(\"Either an album id or a album data must be provided\")\n\n if album_id is not None:\n album_data = self.graph.get_object(album_id)\n\n self.me = album_data['id']\n self.name = album_data['name']\n self.desc = album_data.get('description', None)\n self.count = album_data.get('count', 0)\n if 'cover_photo' in album_data:\n self.cover_photo = Photo(self.me, graph=self.graph, photo_id=album_data['cover_photo']).thumbnail\n else:\n self.cover_photo = None\n \n def get_model(self):\n query = models.AlbumModel.all()\n query.filter('me =', self.me)\n\n try:\n return query.fetch(1)[0]\n except IndexError:\n cover_thumb = None\n if self.cover_photo is not None:\n cover_thumb = self.cover_photo\n\n entity = models.AlbumModel(me=self.me,\n token=self.token,\n name=self.name,\n desc=self.desc,\n cover_photo=cover_thumb)\n entity.put()\n return entity\n\n def get_photos(self):\n \"\"\"\n Get a list of Photo objects\n \"\"\"\n\n photos_data = self.graph.get_connections(self.me, 'photos')['data']\n \n photos = []\n for photo_data in photos_data:\n query = models.PhotoModel.all()\n query.filter('me =', photo_data['id'])\n try:\n photos.append(query.fetch(1)[0])\n except IndexError:\n name = None\n if 'name' in photo_data:\n name = photo_data['name']\n\n orig = photo_data['images'][0]['source']\n \n entity = models.PhotoModel(me=photo_data['id'],\n album_id=self.me,\n name=name,\n thumbnail=photo_data['picture'],\n original=orig)\n entity.put()\n\n photos.append(entity)\n \n return photos\n \nclass Photo(object):\n def __init__(self, album_id, graph=None, token=None, photo_id=None, photo_data=None):\n if graph is None and token is None:\n raise TypeError(\"Either a graph object must be provided or a token must be provided\")\n\n if graph is not None:\n self.graph = graph\n else:\n self.graph = fb.GraphAPI(token.access_token)\n\n if photo_id is None and photo_data is None:\n raise TypeError(\"Either an album id or a album data must be provided\")\n\n if photo_id is not None:\n photo_data = self.graph.get_object(photo_id)\n\n self.me = photo_data['id']\n self.name = photo_data.get('name', None)\n self.thumbnail = photo_data['picture']\n self.original = photo_data['images'][0]['source']\n self.album_id = album_id\n\n def get_model(self):\n query = models.PhotoModel.all()\n query.filter('me =', self.me)\n\n try:\n return query.fetch(1)[0]\n except IndexError:\n entity = models.PhotoModel(me=self.me,\n album_id=self.album_id,\n name=self.name,\n thumbnail=self.thumbnail,\n original=self.original)\n entity.put()\n return entity\n "},"license":{"kind":"string","value":"bsd-3-clause"},"hash":{"kind":"number","value":2272791862150649300,"string":"2,272,791,862,150,649,300"},"line_mean":{"kind":"number","value":33.928,"string":"33.928"},"line_max":{"kind":"number","value":109,"string":"109"},"alpha_frac":{"kind":"number","value":0.5296071469,"string":"0.529607"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.36768384192096,"string":"4.367684"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":83,"cells":{"repo_name":{"kind":"string","value":"hectord/lettuce"},"path":{"kind":"string","value":"tests/integration/lib/Django-1.2.5/django/core/handlers/base.py"},"copies":{"kind":"string","value":"44"},"size":{"kind":"string","value":"9926"},"content":{"kind":"string","value":"import sys\n\nfrom django import http\nfrom django.core import signals\nfrom django.utils.encoding import force_unicode\nfrom django.utils.importlib import import_module\n\nclass BaseHandler(object):\n # Changes that are always applied to a response (in this order).\n response_fixes = [\n http.fix_location_header,\n http.conditional_content_removal,\n http.fix_IE_for_attach,\n http.fix_IE_for_vary,\n ]\n\n def __init__(self):\n self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None\n\n def load_middleware(self):\n \"\"\"\n Populate middleware lists from settings.MIDDLEWARE_CLASSES.\n\n Must be called after the environment is fixed (see __call__).\n \"\"\"\n from django.conf import settings\n from django.core import exceptions\n self._view_middleware = []\n self._response_middleware = []\n self._exception_middleware = []\n\n request_middleware = []\n for middleware_path in settings.MIDDLEWARE_CLASSES:\n try:\n dot = middleware_path.rindex('.')\n except ValueError:\n raise exceptions.ImproperlyConfigured('%s isn\\'t a middleware module' % middleware_path)\n mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]\n try:\n mod = import_module(mw_module)\n except ImportError, e:\n raise exceptions.ImproperlyConfigured('Error importing middleware %s: \"%s\"' % (mw_module, e))\n try:\n mw_class = getattr(mod, mw_classname)\n except AttributeError:\n raise exceptions.ImproperlyConfigured('Middleware module \"%s\" does not define a \"%s\" class' % (mw_module, mw_classname))\n\n try:\n mw_instance = mw_class()\n except exceptions.MiddlewareNotUsed:\n continue\n\n if hasattr(mw_instance, 'process_request'):\n request_middleware.append(mw_instance.process_request)\n if hasattr(mw_instance, 'process_view'):\n self._view_middleware.append(mw_instance.process_view)\n if hasattr(mw_instance, 'process_response'):\n self._response_middleware.insert(0, mw_instance.process_response)\n if hasattr(mw_instance, 'process_exception'):\n self._exception_middleware.insert(0, mw_instance.process_exception)\n\n # We only assign to this when initialization is complete as it is used\n # as a flag for initialization being complete.\n self._request_middleware = request_middleware\n\n def get_response(self, request):\n \"Returns an HttpResponse object for the given HttpRequest\"\n from django.core import exceptions, urlresolvers\n from django.conf import settings\n\n try:\n try:\n # Setup default url resolver for this thread.\n urlconf = settings.ROOT_URLCONF\n urlresolvers.set_urlconf(urlconf)\n resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)\n\n # Apply request middleware\n for middleware_method in self._request_middleware:\n response = middleware_method(request)\n if response:\n return response\n\n if hasattr(request, \"urlconf\"):\n # Reset url resolver with a custom urlconf.\n urlconf = request.urlconf\n urlresolvers.set_urlconf(urlconf)\n resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)\n\n callback, callback_args, callback_kwargs = resolver.resolve(\n request.path_info)\n\n # Apply view middleware\n for middleware_method in self._view_middleware:\n response = middleware_method(request, callback, callback_args, callback_kwargs)\n if response:\n return response\n\n try:\n response = callback(request, *callback_args, **callback_kwargs)\n except Exception, e:\n # If the view raised an exception, run it through exception\n # middleware, and if the exception middleware returns a\n # response, use that. Otherwise, reraise the exception.\n for middleware_method in self._exception_middleware:\n response = middleware_method(request, e)\n if response:\n return response\n raise\n\n # Complain if the view returned None (a common error).\n if response is None:\n try:\n view_name = callback.func_name # If it's a function\n except AttributeError:\n view_name = callback.__class__.__name__ + '.__call__' # If it's a class\n raise ValueError(\"The view %s.%s didn't return an HttpResponse object.\" % (callback.__module__, view_name))\n\n return response\n except http.Http404, e:\n if settings.DEBUG:\n from django.views import debug\n return debug.technical_404_response(request, e)\n else:\n try:\n callback, param_dict = resolver.resolve404()\n return callback(request, **param_dict)\n except:\n try:\n return self.handle_uncaught_exception(request, resolver, sys.exc_info())\n finally:\n receivers = signals.got_request_exception.send(sender=self.__class__, request=request)\n except exceptions.PermissionDenied:\n return http.HttpResponseForbidden('

Permission denied

')\n except SystemExit:\n # Allow sys.exit() to actually exit. See tickets #1023 and #4701\n raise\n except: # Handle everything else, including SuspiciousOperation, etc.\n # Get the exception info now, in case another exception is thrown later.\n receivers = signals.got_request_exception.send(sender=self.__class__, request=request)\n return self.handle_uncaught_exception(request, resolver, sys.exc_info())\n finally:\n # Reset URLconf for this thread on the way out for complete\n # isolation of request.urlconf\n urlresolvers.set_urlconf(None)\n\n def handle_uncaught_exception(self, request, resolver, exc_info):\n \"\"\"\n Processing for any otherwise uncaught exceptions (those that will\n generate HTTP 500 responses). Can be overridden by subclasses who want\n customised 500 handling.\n\n Be *very* careful when overriding this because the error could be\n caused by anything, so assuming something like the database is always\n available would be an error.\n \"\"\"\n from django.conf import settings\n from django.core.mail import mail_admins\n\n if settings.DEBUG_PROPAGATE_EXCEPTIONS:\n raise\n\n if settings.DEBUG:\n from django.views import debug\n return debug.technical_500_response(request, *exc_info)\n\n # When DEBUG is False, send an error message to the admins.\n subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)\n try:\n request_repr = repr(request)\n except:\n request_repr = \"Request repr() unavailable\"\n message = \"%s\\n\\n%s\" % (self._get_traceback(exc_info), request_repr)\n mail_admins(subject, message, fail_silently=True)\n # If Http500 handler is not installed, re-raise last exception\n if resolver.urlconf_module is None:\n raise exc_info[1], None, exc_info[2]\n # Return an HttpResponse that displays a friendly error message.\n callback, param_dict = resolver.resolve500()\n return callback(request, **param_dict)\n\n def _get_traceback(self, exc_info=None):\n \"Helper function to return the traceback as a string\"\n import traceback\n return '\\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))\n\n def apply_response_fixes(self, request, response):\n \"\"\"\n Applies each of the functions in self.response_fixes to the request and\n response, modifying the response in the process. Returns the new\n response.\n \"\"\"\n for func in self.response_fixes:\n response = func(request, response)\n return response\n\ndef get_script_name(environ):\n \"\"\"\n Returns the equivalent of the HTTP request's SCRIPT_NAME environment\n variable. If Apache mod_rewrite has been used, returns what would have been\n the script name prior to any rewriting (so it's the script name as seen\n from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to\n anything).\n \"\"\"\n from django.conf import settings\n if settings.FORCE_SCRIPT_NAME is not None:\n return force_unicode(settings.FORCE_SCRIPT_NAME)\n\n # If Apache's mod_rewrite had a whack at the URL, Apache set either\n # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any\n # rewrites. Unfortunately not every Web server (lighttpd!) passes this\n # information through all the time, so FORCE_SCRIPT_NAME, above, is still\n # needed.\n script_url = environ.get('SCRIPT_URL', u'')\n if not script_url:\n script_url = environ.get('REDIRECT_URL', u'')\n if script_url:\n return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])\n return force_unicode(environ.get('SCRIPT_NAME', u''))\n\n"},"license":{"kind":"string","value":"gpl-3.0"},"hash":{"kind":"number","value":7864434311480931000,"string":"7,864,434,311,480,931,000"},"line_mean":{"kind":"number","value":44.1181818182,"string":"44.118182"},"line_max":{"kind":"number","value":143,"string":"143"},"alpha_frac":{"kind":"number","value":0.5984283699,"string":"0.598428"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.8872476612506155,"string":"4.887248"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":84,"cells":{"repo_name":{"kind":"string","value":"ahmadio/edx-platform"},"path":{"kind":"string","value":"lms/lib/courseware_search/lms_filter_generator.py"},"copies":{"kind":"string","value":"58"},"size":{"kind":"string","value":"5634"},"content":{"kind":"string","value":"\"\"\"\nThis file contains implementation override of SearchFilterGenerator which will allow\n * Filter by all courses in which the user is enrolled in\n\"\"\"\nfrom microsite_configuration import microsite\n\nfrom student.models import CourseEnrollment\nfrom opaque_keys import InvalidKeyError\nfrom opaque_keys.edx.keys import CourseKey\nfrom opaque_keys.edx.locations import SlashSeparatedCourseKey\nfrom xmodule.modulestore.django import modulestore\n\nfrom search.filter_generator import SearchFilterGenerator\nfrom openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme\nfrom openedx.core.djangoapps.course_groups.partition_scheme import CohortPartitionScheme\nfrom courseware.access import get_user_role\n\n\nINCLUDE_SCHEMES = [CohortPartitionScheme, RandomUserPartitionScheme, ]\nSCHEME_SUPPORTS_ASSIGNMENT = [RandomUserPartitionScheme, ]\n\n\nclass LmsSearchFilterGenerator(SearchFilterGenerator):\n \"\"\" SearchFilterGenerator for LMS Search \"\"\"\n\n _user_enrollments = {}\n\n def _enrollments_for_user(self, user):\n \"\"\" Return the specified user's course enrollments \"\"\"\n if user not in self._user_enrollments:\n self._user_enrollments[user] = CourseEnrollment.enrollments_for_user(user)\n return self._user_enrollments[user]\n\n def filter_dictionary(self, **kwargs):\n \"\"\" LMS implementation, adds filtering by user partition, course id and user \"\"\"\n\n def get_group_for_user_partition(user_partition, course_key, user):\n \"\"\" Returns the specified user's group for user partition \"\"\"\n if user_partition.scheme in SCHEME_SUPPORTS_ASSIGNMENT:\n return user_partition.scheme.get_group_for_user(\n course_key,\n user,\n user_partition,\n assign=False,\n )\n else:\n return user_partition.scheme.get_group_for_user(\n course_key,\n user,\n user_partition,\n )\n\n def get_group_ids_for_user(course, user):\n \"\"\" Collect user partition group ids for user for this course \"\"\"\n partition_groups = []\n for user_partition in course.user_partitions:\n if user_partition.scheme in INCLUDE_SCHEMES:\n group = get_group_for_user_partition(user_partition, course.id, user)\n if group:\n partition_groups.append(group)\n partition_group_ids = [unicode(partition_group.id) for partition_group in partition_groups]\n return partition_group_ids if partition_group_ids else None\n\n filter_dictionary = super(LmsSearchFilterGenerator, self).filter_dictionary(**kwargs)\n if 'user' in kwargs:\n user = kwargs['user']\n\n if 'course_id' in kwargs and kwargs['course_id']:\n try:\n course_key = CourseKey.from_string(kwargs['course_id'])\n except InvalidKeyError:\n course_key = SlashSeparatedCourseKey.from_deprecated_string(kwargs['course_id'])\n\n # Staff user looking at course as staff user\n if get_user_role(user, course_key) in ('instructor', 'staff'):\n return filter_dictionary\n # Need to check course exist (if course gets deleted enrollments don't get cleaned up)\n course = modulestore().get_course(course_key)\n if course:\n filter_dictionary['content_groups'] = get_group_ids_for_user(course, user)\n else:\n user_enrollments = self._enrollments_for_user(user)\n content_groups = []\n for enrollment in user_enrollments:\n course = modulestore().get_course(enrollment.course_id)\n if course:\n enrollment_group_ids = get_group_ids_for_user(course, user)\n if enrollment_group_ids:\n content_groups.extend(enrollment_group_ids)\n\n filter_dictionary['content_groups'] = content_groups if content_groups else None\n\n return filter_dictionary\n\n def field_dictionary(self, **kwargs):\n \"\"\" add course if provided otherwise add courses in which the user is enrolled in \"\"\"\n field_dictionary = super(LmsSearchFilterGenerator, self).field_dictionary(**kwargs)\n if not kwargs.get('user'):\n field_dictionary['course'] = []\n elif not kwargs.get('course_id'):\n user_enrollments = self._enrollments_for_user(kwargs['user'])\n field_dictionary['course'] = [unicode(enrollment.course_id) for enrollment in user_enrollments]\n\n # if we have an org filter, only include results for this org filter\n course_org_filter = microsite.get_value('course_org_filter')\n if course_org_filter:\n field_dictionary['org'] = course_org_filter\n\n return field_dictionary\n\n def exclude_dictionary(self, **kwargs):\n \"\"\" If we are not on a microsite, then exclude any microsites that are defined \"\"\"\n exclude_dictionary = super(LmsSearchFilterGenerator, self).exclude_dictionary(**kwargs)\n course_org_filter = microsite.get_value('course_org_filter')\n # If we have a course filter we are ensuring that we only get those courses above\n if not course_org_filter:\n org_filter_out_set = microsite.get_all_orgs()\n if org_filter_out_set:\n exclude_dictionary['org'] = list(org_filter_out_set)\n\n return exclude_dictionary\n"},"license":{"kind":"string","value":"agpl-3.0"},"hash":{"kind":"number","value":7445365950426380000,"string":"7,445,365,950,426,380,000"},"line_mean":{"kind":"number","value":45.5619834711,"string":"45.561983"},"line_max":{"kind":"number","value":107,"string":"107"},"alpha_frac":{"kind":"number","value":0.6324103656,"string":"0.63241"},"autogenerated":{"kind":"bool","value":false,"string":"false"},"ratio":{"kind":"number","value":4.648514851485149,"string":"4.648515"},"config_test":{"kind":"bool","value":false,"string":"false"},"has_no_keywords":{"kind":"bool","value":false,"string":"false"},"few_assignments":{"kind":"bool","value":false,"string":"false"}}},{"rowIdx":85,"cells":{"repo_name":{"kind":"string","value":"goliate/sarakha63-persomov"},"path":{"kind":"string","value":"couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/twentyfourvideo.py"},"copies":{"kind":"string","value":"32"},"size":{"kind":"string","value":"3892"},"content":{"kind":"string","value":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n parse_iso8601,\n int_or_none,\n)\n\n\nclass TwentyFourVideoIE(InfoExtractor):\n IE_NAME = '24video'\n _VALID_URL = r'https?://(?:www\\.)?24video\\.net/(?:video/(?:view|xml)/|player/new24_play\\.swf\\?id=)(?P\\d+)'\n\n _TESTS = [\n {\n 'url': 'http://www.24video.net/video/view/1044982',\n 'md5': '48dd7646775690a80447a8dca6a2df76',\n 'info_dict': {\n 'id': '1044982',\n 'ext': 'mp4',\n 'title': 'Эротика каменного века',\n 'description': 'Как смотрели порно в каменном веке.',\n 'thumbnail': 're:^https?://.*\\.jpg$',\n 'uploader': 'SUPERTELO',\n 'duration': 31,\n 'timestamp': 1275937857,\n 'upload_date': '20100607',\n 'age_limit': 18,\n 'like_count': int,\n 'dislike_count': int,\n },\n },\n {\n 'url': 'http://www.24video.net/player/new24_play.swf?id=1044982',\n 'only_matching': True,\n }\n ]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n\n webpage = self._download_webpage(\n 'http://www.24video.net/video/view/%s' % video_id, video_id)\n\n title = self._og_search_title(webpage)\n description = self._html_search_regex(\n r'([^<]+)', webpage, 'description', fatal=False)\n thumbnail = self._og_search_thumbnail(webpage)\n duration = int_or_none(self._og_search_property(\n 'duration', webpage, 'duration', fatal=False))\n timestamp = parse_iso8601(self._search_regex(\n r'