\\n' % escape(response_body.decode('ascii')))\n ohandle.write('\\n')\n ohandle.write('\\n')\n\n self.ofhandle.write(ohandle.getvalue().encode('utf-8')) # TODO: should use utf-8 universally?\n\n def finishWrite(self):\n self.qlock.lock()\n while len(self.datalist)>0:\n self.writeData(self.datalist.popleft())\n self.ofhandle.write(b'')\n self.ofhandle.close()\n\n def shutdown(self):\n self.finishWrite()\n self.exit(0)\n\n def quitHandler(self):\n logger.info('got quit()')\n self.exit(0)\n\nclass MainWindow(QWidget):\n def __init__(self, targets, skips = [], spider = False, images = False, parent = None):\n\n QWidget.__init__(self, parent)\n self.qlock = QMutex()\n\n self.logger = logging.getLogger(__name__)\n self.logger.info('starting...')\n\n self.resize(800, 600)\n self.setWindowTitle('RAFT Crawler')\n\n self.num_tabs = 4\n \n self.qtab = QTabWidget(self)\n self.qtab.setGeometry(10, 45, 650, 530)\n\n self.qbutton = QPushButton('Close', self)\n self.qbutton.setGeometry(10, 10, 60, 35)\n self.connect(self.qbutton, SIGNAL('clicked()'), self.quit)\n\n self.gbutton = QPushButton('Go', self)\n self.gbutton.setGeometry(70, 10, 60, 35)\n self.connect(self.gbutton, SIGNAL('clicked()'), self.go)\n\n self.qlineedit = QLineEdit(self)\n self.qlineedit.setGeometry(130, 10, 400, 35)\n\n QObject.connect(self, SIGNAL('navigationFinished(int, QString)'), self.navigationFinishedHandler)\n QObject.connect(self, SIGNAL('addTargetUrl(QUrl)'), self.addTargetUrlHandler)\n QObject.connect(self, SIGNAL('waitForOutstanding()'), self.waitForOutstandingHandler)\n qApp.setQuitOnLastWindowClosed(False)\n qApp.lastWindowClosed.connect(self.quit)\n\n self.writer = WriterThread('.', logger)\n self.writer.start()\n\n self.networkManager = NetworkManager(self.writer)\n\n self.exclude_patterns = []\n for skip in skips:\n self.exclude_patterns.append(re.compile(skip, re.I))\n\n self.browsers = []\n self.available = []\n for i in range(0, self.num_tabs):\n tab = QWidget()\n self.qtab.addTab(tab, 'Tab %d' % i)\n self.browsers.append(BrowserWindow(tab, self, i, self.exclude_patterns, spider, images))\n self.available.append(True)\n\n self.index = 0\n self.targets = targets\n self.targets_outstanding = {}\n self.link_count = {}\n\n self.connect(self, SIGNAL('quit()'), self.quit)\n self.automode = False\n if len(self.targets) > 0:\n self.automode = True\n self.go()\n \n def quit(self):\n if self.writer.isRunning():\n self.writer.shutdown()\n self.logger.debug('waiting for thread... finished = %s', self.writer.isFinished())\n self.writer.wait(500)\n self.logger.debug('exiting... finished = %s', self.writer.isFinished())\n self.logger.info('quitting...')\n QTimer.singleShot(0, qApp, SLOT('quit()'))\n\n def go(self):\n self.qlock.lock()\n try:\n entry = self.qlineedit.text()\n if entry:\n entry = QUrl.fromUserInput(entry).toEncoded().data().decode('utf-8')\n self.targets.append(entry)\n self.qlineedit.setText('')\n for target in self.targets:\n self.targets_outstanding[target] = True\n finally:\n self.qlock.unlock()\n\n self.dispatchNext()\n\n def waitForOutstandingHandler(self):\n self.qlock.lock()\n outstanding = len(self.targets_outstanding)\n if outstanding > 0:\n self.logger.debug('waiting for [%d] outstanding' % (outstanding))\n self.qlock.unlock()\n self.dispatchNext()\n else:\n self.qlock.unlock()\n QTimer.singleShot(1000, self, SIGNAL('quit()'))\n\n def addTargetUrlHandler(self, url):\n if not self.qlock.tryLock(1000):\n self.logger.debug('failed to lock for url %s\\n' % (url))\n return\n else:\n self.logger.debug('locked after tryLock')\n\n try:\n target = url.toString(QUrl.FullyEncoded)\n for pat in self.exclude_patterns:\n if pat.search(target):\n self.logger.warn('excluding target: %s\\n' % (target))\n return\n if not target in self.targets:\n host = url.host()\n if host in self.link_count:\n if self.link_count[host] > MAX_LINK_COUNT:\n return\n else:\n self.link_count[host] += 1\n else:\n self.link_count[host] = 1\n print(('adding target [%s]' % (target)))\n self.targets_outstanding[target] = True\n self.targets.append(target)\n finally:\n self.qlock.unlock()\n \n def navigationFinishedHandler(self, index, url):\n if not self.qlock.tryLock(1000):\n self.logger.debug('failed to lock for url %s and index %d\\n' % (url, index))\n return\n else:\n self.logger.debug('locked after tryLock')\n try:\n target = url\n if target not in self.targets_outstanding:\n self.logger.debug('unexpected target: %s, %s' % (target, repr(self.targets_outstanding)))\n else:\n self.logger.debug('removing outstanding: %s' % (target))\n self.targets_outstanding.pop(target)\n\n self.available[index] = True\n self.qlock.unlock()\n except:\n self.qlock.unlock()\n\n self.dispatchNext()\n\n def dispatchNext(self):\n self.qlock.lock()\n try:\n for i in range(0, self.num_tabs):\n if self.index < len(self.targets):\n if self.available[i]:\n target = self.targets[self.index]\n self.logger.debug('dispatching target: %s to %d' % (target, i))\n self.available[i] = False\n self.browsers[i].emit(SIGNAL('navigate(QString)'), target)\n self.index += 1\n elif self.automode:\n self.qlock.unlock()\n self.logger.debug('all targets dispatched ... waiting')\n QTimer.singleShot(1000, self, SIGNAL('waitForOutstanding()'))\n break\n else:\n self.qlock.unlock()\n except:\n self.qlock.unlock()\n raise\n\ndef capture_error(typ, val, traceb):\n import traceback\n print(('type=%s, value=%s\\n%s' % (typ, val, traceback.format_tb(traceb))))\n\nif '__main__' == __name__:\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n targets = []\n skips = []\n spider = False\n images = False\n if len(sys.argv) > 1:\n # file\n fileargs = []\n i = 1\n while i < len(sys.argv):\n arg = sys.argv[i]\n if arg.startswith('-'):\n if arg[1:] in ('spider',):\n spider = True\n elif arg[1:] in ('images',):\n images = True\n elif arg[1:] in ('X','exclude'):\n skips.extend([x.strip() for x in sys.argv[i+1].split(',')])\n i += 1\n else:\n fileargs.append(arg)\n i += 1\n \n for arg in fileargs:\n if os.path.exists(arg):\n for line in open(arg, 'r'):\n target = line.rstrip()\n if not (target.startswith('http:') or target.startswith('https:')):\n target = QUrl.fromUserInput(target).toEncoded().data().decode('utf-8')\n targets.append(target)\n else:\n target = arg\n if not (target.startswith('http:') or target.startswith('https:')):\n target = QUrl.fromUserInput(target).toEncoded().data().decode('utf-8')\n targets.append(target)\n\n app = QApplication([])\n main = MainWindow(targets, skips, spider, images)\n main.show()\n\n sys.excepthook = capture_error\n\n sys.exit(app.exec_())\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1555,"cells":{"__id__":{"kind":"number","value":18863496368915,"string":"18,863,496,368,915"},"blob_id":{"kind":"string","value":"1329dd8371c2c1a4b3f9cd9df0d37636f247d4d1"},"directory_id":{"kind":"string","value":"13d6931dd964eca57248b470af6f6b34784d7236"},"path":{"kind":"string","value":"/ostinato/__init__.py"},"content_id":{"kind":"string","value":"4f1cd3fdf5bb928312ea91df6150573e964a6b7c"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"wd5/django-ostinato"},"repo_url":{"kind":"string","value":"https://github.com/wd5/django-ostinato"},"snapshot_id":{"kind":"string","value":"c0ee7f9e1a52bef09f9d9ab33787c1316a1ce554"},"revision_id":{"kind":"string","value":"0aa485cd7fef5e8da9b230b9c2aaaf19b56ed5c3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-15T17:36:47.055268","string":"2021-01-15T17:36:47.055268"},"revision_date":{"kind":"timestamp","value":"2012-12-02T21:19:10","string":"2012-12-02T21:19:10"},"committer_date":{"kind":"timestamp","value":"2012-12-02T21:19:10","string":"2012-12-02T21:19:10"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"VERSION = (0, 97)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1556,"cells":{"__id__":{"kind":"number","value":6176163015028,"string":"6,176,163,015,028"},"blob_id":{"kind":"string","value":"bf34e94411742ad4420048b8c976476932e554a9"},"directory_id":{"kind":"string","value":"4dc4ac3f2059cfbd19c75c185909a001da90a6e8"},"path":{"kind":"string","value":"/di_sms/main/south_migrations/0001_initial.py"},"content_id":{"kind":"string","value":"a04ea9bb4fead9bf5d65fdbb0da8246fd3f48c0e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"onaio/di_sms"},"repo_url":{"kind":"string","value":"https://github.com/onaio/di_sms"},"snapshot_id":{"kind":"string","value":"51d4ab2e7ce486071715e2221c87b2c7d45ba605"},"revision_id":{"kind":"string","value":"95eb0b4e15d73ffca48f1ef4e1fcd6d17f4e5fd2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-24T22:51:37.754017","string":"2021-01-24T22:51:37.754017"},"revision_date":{"kind":"timestamp","value":"2014-11-17T18:57:53","string":"2014-11-17T18:57:53"},"committer_date":{"kind":"timestamp","value":"2014-11-17T18:57:53","string":"2014-11-17T18:57:53"},"github_id":{"kind":"number","value":25510812,"string":"25,510,812"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding model 'Section'\n db.create_table(u'main_section', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('name', self.gf('django.db.models.fields.CharField')(max_length=255)),\n ('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),\n ))\n db.send_create_signal(u'main', ['Section'])\n\n # Adding model 'Question'\n db.create_table(u'main_question', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('number', self.gf('django.db.models.fields.IntegerField')(unique=True)),\n ('section', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Section'])),\n ('question', self.gf('django.db.models.fields.TextField')()),\n ('question_type', self.gf('django.db.models.fields.CharField')(max_length=3)),\n ('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),\n ))\n db.send_create_signal(u'main', ['Question'])\n\n # Adding model 'Answer'\n db.create_table(u'main_answer', (\n (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),\n ('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Question'])),\n ('answer', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),\n ('phone_number', self.gf('django.db.models.fields.CharField')(max_length=100, db_index=True)),\n ('date_created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),\n ('date_modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),\n ))\n db.send_create_signal(u'main', ['Answer'])\n\n\n def backwards(self, orm):\n # Deleting model 'Section'\n db.delete_table(u'main_section')\n\n # Deleting model 'Question'\n db.delete_table(u'main_question')\n\n # Deleting model 'Answer'\n db.delete_table(u'main_answer')\n\n\n models = {\n u'main.answer': {\n 'Meta': {'object_name': 'Answer'},\n 'answer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),\n 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),\n 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Question']\"})\n },\n u'main.question': {\n 'Meta': {'object_name': 'Question'},\n 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'number': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),\n 'question': ('django.db.models.fields.TextField', [], {}),\n 'question_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),\n 'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Section']\"})\n },\n u'main.section': {\n 'Meta': {'object_name': 'Section'},\n 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),\n 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})\n }\n }\n\n complete_apps = ['main']"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1557,"cells":{"__id__":{"kind":"number","value":14259291464403,"string":"14,259,291,464,403"},"blob_id":{"kind":"string","value":"817c4c22cc039ebe5926b3ee0170a90aa126c37c"},"directory_id":{"kind":"string","value":"59d60588d9581b0e0b0236f660e65c0b80df0776"},"path":{"kind":"string","value":"/src/concoord/object/binarytree.py"},"content_id":{"kind":"string","value":"909591248dc2352c471038fa967d7829f6324021"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"rescrv/concoord"},"repo_url":{"kind":"string","value":"https://github.com/rescrv/concoord"},"snapshot_id":{"kind":"string","value":"e878c2034198c3832e4da1edba52a99ff575399b"},"revision_id":{"kind":"string","value":"03c7ef495e5a0c6edb5978be9f31a33c2fbff867"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-16T22:20:44.459221","string":"2021-01-16T22:20:44.459221"},"revision_date":{"kind":"timestamp","value":"2012-06-25T20:52:08","string":"2012-06-25T20:52:08"},"committer_date":{"kind":"timestamp","value":"2012-06-25T20:52:08","string":"2012-06-25T20:52:08"},"github_id":{"kind":"number","value":4747678,"string":"4,747,678"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\n@author: Deniz Altinbuken, Emin Gun Sirer\n@note: Example binarytree\n@copyright: See LICENSE\n\"\"\"\nclass BinaryTree:\n def __init__(self, **kwargs):\n self.root = None\n \n def add_node(self, data, **kwargs):\n return Node(data)\n\n def insert(self, root, data, **kwargs):\n if root == None:\n return self.add_node(data)\n else:\n if data <= root.data:\n root.left = self.insert(root.left, data)\n else:\n root.right = self.insert(root.right, data)\n return root\n \n def find(self, root, target, **kwargs):\n if root == None:\n return False\n else:\n if target == root.data:\n return True\n else:\n if target < root.data:\n return self.find(root.left, target)\n else:\n return self.find(root.right, target)\n\n def delete(self, root, target, **kwargs):\n if root == None or not self.find(root, target):\n return False\n else:\n if target == root.data:\n del root\n else:\n if target < root.data:\n return self.delete(root.left, target)\n else:\n return self.delete(root.right, target)\n \n def get_min(self, root, **kwargs):\n while(root.left != None):\n root = root.left\n return root.data\n\n def get_max(self, root, **kwargs):\n while(root.right != None):\n root = root.right\n return root.data\n\n def get_depth(self, root, **kwargs):\n if root == None:\n return 0\n else:\n ldepth = self.get_depth(root.left)\n rdepth = self.get_depth(root.right)\n return max(ldepth, rdepth) + 1\n \n def get_size(self, root, **kwargs):\n if root == None:\n return 0\n else:\n return self.get_size(root.left) + 1 + self.get_size(root.right)\n\nclass Node:\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n \n \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1558,"cells":{"__id__":{"kind":"number","value":8787503114438,"string":"8,787,503,114,438"},"blob_id":{"kind":"string","value":"feaf753d849eaadc3dccaec89b5940a66a7f9e9c"},"directory_id":{"kind":"string","value":"8e3cf73959d2e675c42ca2da65d0b206ee6a7727"},"path":{"kind":"string","value":"/src/warden/warden_init.py"},"content_id":{"kind":"string","value":"569f4e5737c0f4423090d630df3f4821fe4a19cd"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"matthewhampton/warden"},"repo_url":{"kind":"string","value":"https://github.com/matthewhampton/warden"},"snapshot_id":{"kind":"string","value":"2b196edbd1036c267d051ab96470aeb2a7002f53"},"revision_id":{"kind":"string","value":"f3dfc4c658a0ea1af3625ed178d2d37455c84b67"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T05:39:58.243178","string":"2021-01-18T05:39:58.243178"},"revision_date":{"kind":"timestamp","value":"2013-07-18T15:34:59","string":"2013-07-18T15:34:59"},"committer_date":{"kind":"timestamp","value":"2013-07-18T15:34:59","string":"2013-07-18T15:34:59"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"\"\"\"\nAFTER A NEW INSTALL OF WARDEN (using setup.py) we need to get the system ready for use\n1) Make sure warden.settings exists\n2) read warden.settings file (or use command line parameters, arguments etc)\n3) carbon: ensure the required configuration files are present\n4) diamond: ensure the required configuration files are present\n5) gentry: read settings module\n check if database exists... clear...syncdb...migrate etc..\n\"\"\"\nimport getpass\nimport subprocess\nfrom warden.AutoConf import autoconf, get_home\nfrom warden_logging import log\nimport os\nimport sys\nimport imp\nimport base64\nimport textwrap\nimport re\nfrom django.core import management\nfrom distutils import dir_util, file_util\n\n\ndef setup(\n home,\n super_user,\n project_name\n):\n \"\"\"\n Warden uses values from its default settings file UNLESS explicitely defined\n here in the constructor.\n \"\"\"\n os.environ['DJANGO_SETTINGS_MODULE'] = 'gentry.settings'\n\n log.info ('$DJANGO_SETTINGS_MODULE = %s' % os.environ['DJANGO_SETTINGS_MODULE'])\n from django.conf import settings as gsetts\n\n database = gsetts.DATABASES['default']['NAME']\n\n if not os.path.exists(os.path.dirname(database)):\n os.makedirs(os.path.dirname(database))\n\n management.execute_from_command_line(['manage.py', 'syncdb','--noinput'])\n management.execute_from_command_line(['manage.py', 'migrate', '--noinput'])\n\n # add a super user\n if super_user:\n username = super_user[0]\n password = super_user[1]\n email = super_user[2]\n\n from sentry.models import User\n try:\n auser = User.objects.using('default').get(username=username)\n except User.DoesNotExist:\n auser = User.objects.db_manager('default').create_superuser(username, email, password)\n log.info('Added Sentry superuser \"%s\" with password like \"%s%s\"' % (username, password[:3], '*'*(len(password)-3)))\n else:\n log.error('Username \"%s\" is already taken.' % username)\n\n if project_name:\n\n project_slug = project_name.lower().replace(' ','_')\n try:\n # add a project\n from sentry.models import Project, Team\n team = Team.objects.create(name=project_name + ' Team', slug=project_slug + '_team', owner=auser)\n project = Project.objects.create(name=project_name, slug=project_slug, owner=auser, team=team)\n key = project.key_set.filter(user=auser)[0]\n dsn = \"http://%s:%s@localhost:%s/%s\" % (key.public_key, key.secret_key, gsetts.SENTRY_WEB_PORT, key.project_id)\n log.info('Added \"%s\" project to Sentry with dsn: %s' % (project_name, dsn))\n\n except Exception:\n log.error('Failed to create project.')\n\ndef indent(text, spaces=4, strip=False):\n \"\"\"\n Borrowed from fabric\n\n Return ``text`` indented by the given number of spaces.\n\n If text is not a string, it is assumed to be a list of lines and will be\n joined by ``\\\\n`` prior to indenting.\n\n When ``strip`` is ``True``, a minimum amount of whitespace is removed from\n the left-hand side of the given string (so that relative indents are\n preserved, but otherwise things are left-stripped). This allows you to\n effectively \"normalize\" any previous indentation for some inputs.\n \"\"\"\n # Normalize list of strings into a string for dedenting. \"list\" here means\n # \"not a string\" meaning \"doesn't have splitlines\". Meh.\n if not hasattr(text, 'splitlines'):\n text = '\\n'.join(text)\n # Dedent if requested\n if strip:\n text = textwrap.dedent(text)\n prefix = ' ' * spaces\n output = '\\n'.join(prefix + line for line in text.splitlines())\n # Strip out empty lines before/aft\n output = output.strip()\n # Reintroduce first indent (which just got stripped out)\n output = prefix + output\n return output\n\ndef passprompt(prompt_str):\n p1 = getpass.getpass(prompt_str)\n p2 = getpass.getpass('(Again!) ' + prompt_str)\n while p1 != p2:\n p1 = getpass.getpass('(Um. They didn\\'t match) ' + prompt_str)\n p2 = getpass.getpass('(Again!) ' + prompt_str)\n return p1\n\ndef prompt(text, default='', validate=None, password=False):\n \"\"\"\n Borrowed from fabric!\n\n Prompt user with ``text`` and return the input (like ``raw_input``).\n\n A single space character will be appended for convenience, but nothing\n else. Thus, you may want to end your prompt text with a question mark or a\n colon, e.g. ``prompt(\"What hostname?\")``.\n\n If ``default`` is given, it is displayed in square brackets and used if the\n user enters nothing (i.e. presses Enter without entering any text).\n ``default`` defaults to the empty string. If non-empty, a space will be\n appended, so that a call such as ``prompt(\"What hostname?\",\n default=\"foo\")`` would result in a prompt of ``What hostname? [foo]`` (with\n a trailing space after the ``[foo]``.)\n\n The optional keyword argument ``validate`` may be a callable or a string:\n\n * If a callable, it is called with the user's input, and should return the\n value to be stored on success. On failure, it should raise an exception\n with an exception message, which will be printed to the user.\n * If a string, the value passed to ``validate`` is used as a regular\n expression. It is thus recommended to use raw strings in this case. Note\n that the regular expression, if it is not fully matching (bounded by\n ``^`` and ``$``) it will be made so. In other words, the input must fully\n match the regex.\n\n Either way, `prompt` will re-prompt until validation passes (or the user\n hits ``Ctrl-C``).\n\n .. note::\n `~fabric.operations.prompt` honors :ref:`env.abort_on_prompts\n ` and will call `~fabric.utils.abort` instead of\n prompting if that flag is set to ``True``. If you want to block on user\n input regardless, try wrapping with\n `~fabric.context_managers.settings`.\n\n Examples::\n\n # Simplest form:\n environment = prompt('Please specify target environment: ')\n\n # With default, and storing as env.dish:\n prompt('Specify favorite dish: ', 'dish', default='spam & eggs')\n\n # With validation, i.e. requiring integer input:\n prompt('Please specify process nice level: ', key='nice', validate=int)\n\n # With validation against a regular expression:\n release = prompt('Please supply a release name',\n validate=r'^\\w+-\\d+(\\.\\d+)?$')\n\n # Prompt regardless of the global abort-on-prompts setting:\n with settings(abort_on_prompts=False):\n prompt('I seriously need an answer on this! ')\n\n \"\"\"\n default_str = \"\"\n if default != '':\n default_str = \" [%s] \" % str(default).strip()\n else:\n default_str = \" \"\n # Construct full prompt string\n prompt_str = text.strip() + default_str\n # Loop until we pass validation\n value = None\n while value is None:\n # Get input\n value = (passprompt(prompt_str) if password else raw_input(prompt_str)) or default\n # Handle validation\n if validate:\n # Callable\n if callable(validate):\n # Callable validate() must raise an exception if validation\n # fails.\n try:\n value = validate(value)\n except Exception, e:\n # Reset value so we stay in the loop\n value = None\n print(\"Validation failed for the following reason:\")\n print(indent(e.message) + \"\\n\")\n # String / regex must match and will be empty if validation fails.\n else:\n # Need to transform regex into full-matching one if it's not.\n if not validate.startswith('^'):\n validate = r'^' + validate\n if not validate.endswith('$'):\n validate += r'$'\n result = re.findall(validate, value)\n if not result:\n print(\"Regular expression validation failed: '%s' does not match '%s'\\n\" % (value, validate))\n # Reset value so we stay in the loop\n value = None\n return value\n\ndef create_service(home):\n if 'win' in sys.platform:\n if hasattr(sys, \"frozen\"):\n svc_exe = os.path.join(os.path.dirname(sys.executable), 'warden-svc.exe')\n if os.path.exists(svc_exe):\n log.info('Attempting to create service')\n log.info('Output: \\n%s',\n subprocess.check_output([svc_exe, '-h', home, 'install']))\n else:\n pass\n\ndef main():\n import argparse\n import ConfigParser\n parser = argparse.ArgumentParser(description='Warden init script')\n parser.add_argument('home', nargs='?', help=\"the warden home folder\")\n\n prompt_args = [\n ('first-project', \"the first sentry project\", 'first_project'),\n ('super-user', \"the user name for the admin user\", 'super_user'),\n ('super-password', \"the password for the admin user\", 'super_password'),\n ('super-email', \"the email address for the admin user\", 'super_email'),\n ]\n\n for arg,help,dest in prompt_args:\n parser.add_argument('--%s' % arg, help=help, dest=dest, required=False)\n\n args = parser.parse_args()\n\n for arg,help,dest in prompt_args:\n if not getattr(args, dest, None):\n setattr(args, dest, prompt('Enter %s:' % (help), password='password' in arg))\n\n home = get_home(args.home)\n if not os.path.exists(home):\n os.makedirs(home)\n\n os.environ['WARDEN_HOME'] = home\n\n dir_util.copy_tree(os.path.join(os.path.dirname(__file__), 'templateconf'), home)\n\n autoconf(home)\n\n suser = (args.super_user, args.super_password, args.super_email)\n\n setup(home, suser, args.first_project)\n\n create_service(home)\n\nif __name__ == '__main__':\n main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1559,"cells":{"__id__":{"kind":"number","value":15298673517114,"string":"15,298,673,517,114"},"blob_id":{"kind":"string","value":"287994de7f487bcf706517e5b13887aedc4519b2"},"directory_id":{"kind":"string","value":"12e462670828716392bc31638213a57dcdc914ef"},"path":{"kind":"string","value":"/gazobbs/conf.py"},"content_id":{"kind":"string","value":"8720480ec62306a8de7a17f73f060b56e331fcc8"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"sokky/gazoBBS"},"repo_url":{"kind":"string","value":"https://github.com/sokky/gazoBBS"},"snapshot_id":{"kind":"string","value":"234a352459e8f4ee66fac716c1162e331f335517"},"revision_id":{"kind":"string","value":"6a844315bb74e26b3bee65519e3db2f6a4acd0c5"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T16:23:56.971272","string":"2016-09-06T16:23:56.971272"},"revision_date":{"kind":"timestamp","value":"2012-10-25T14:59:53","string":"2012-10-25T14:59:53"},"committer_date":{"kind":"timestamp","value":"2012-10-25T14:59:53","string":"2012-10-25T14:59:53"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#画面用定義(画面制御に使うのでDictで保存)\r\nTITLE = '画像掲示板' # タイトル\r\nSELF_NAME = 'img_board' # 自分へのURL\r\nUSE_THUMB = 1 # 1:サムネイルをつくる 0:なし\r\nRESIMG = 1 # 1:レスに画像を張る 0:はらない\r\nPAGE_DEF = 5 # 1ページに表示する記事数\r\nADMIN_PASS = 'admin_pass' # 管理者パス\r\nMAX_KB = 500 # MAX KB\r\nMAX_W = 250 # MAX 幅\r\nMAX_H = 250 # MAX 高さ\r\nPROXY_CHECK = 0 # proxyの書込みを制限する y:1 n:0\r\nDISP_ID = 2 # ID表示 強制:2 する:1 しない:0\r\nIDSEED = 'idの種' # idの種\r\nBR_CHECK = 15 # 改行を抑制する行数 しない:0\r\nLOG_MAX = 300 # スレッドの最大数\r\nRENZOKU = 5 # 連続投稿秒数\r\nRENZOKU2 = 10 # 画像連続投稿秒数\r\n\r\n#DB用定義(画面制御には使わない)\r\nTBL_LOG = 'log' # 書き込みログTBL\r\nSEQ = 'seq_tbl' # SEQTBL\r\nSEQ_LOG = 'log_no' # SEQNO識別子\r\n\r\n#その他定義\r\nbadstring = set()\r\nbadfile = set()\r\nbadip = set()\r\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1560,"cells":{"__id__":{"kind":"number","value":14783277439377,"string":"14,783,277,439,377"},"blob_id":{"kind":"string","value":"33b376b299fdfd7309eeb4e48ea6970f8c599dd9"},"directory_id":{"kind":"string","value":"dcd83aeb799143b58956612fb0bfc0258d30f229"},"path":{"kind":"string","value":"/src/python/TaskObjects/Tools/GenerateMainScript.py"},"content_id":{"kind":"string","value":"2e34fe1ce5f7d7e23885ef26b60341c25cd96c99"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"giffels/PRODAGENT"},"repo_url":{"kind":"string","value":"https://github.com/giffels/PRODAGENT"},"snapshot_id":{"kind":"string","value":"67e3e841cfca7421caa505d03417b663a62d321b"},"revision_id":{"kind":"string","value":"c99608e3e349397fdd1b0b5c011bf4f33a1c3aad"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T05:51:52.200716","string":"2021-01-01T05:51:52.200716"},"revision_date":{"kind":"timestamp","value":"2012-10-24T13:22:34","string":"2012-10-24T13:22:34"},"committer_date":{"kind":"timestamp","value":"2012-10-24T13:22:34","string":"2012-10-24T13:22:34"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\"\"\"\n_GenerateMainScript_\n\nFor a Given TaskObject instance, create a StructuredFile\nrepresenting a 'Main' Script to run the task, and insert the\ndetails of the Script into the ShREEKTask.\n\nThe StructuredFile instance is added to the TaskObject, and the\nscript name is set as the Executable attribute of the object\n\nThe StructuredFile is not actually populated to run any particular\nexecutable, but rather provides a standard framework in which to\ninsert commands\n\n\"\"\"\n\n\nclass GenerateMainScript:\n \"\"\"\n _GenerateMainScript_\n\n Create a StructuredFile instance using the name of the\n TaskObject.\n Insert details of that StructuredFile into the ShREEKTask in\n the taskObject so that it can function as an executable.\n\n \"\"\"\n\n def __call__(self, taskObject):\n \"\"\"\n _operator()_\n\n Act on Task Object to Generate a Main script and insert\n details into the ShREEKTask\n\n \n \"\"\"\n scriptName = \"%s-main.sh\" % taskObject['Name']\n \n script = taskObject.addStructuredFile(scriptName)\n script.setExecutable()\n script.append(\"#!/bin/bash\")\n script.append(\"echo \\\"Task Running: %s\\\"\" % taskObject['Name'])\n script.append(\"echo \\\"From Dir: `pwd`\\\"\" )\n script.append(\"echo \\\"Started: `date +%s`\\\"\" )\n \n\n taskObject['Executable'] = scriptName\n taskObject['ShREEKTask'].attrs['Executable'] = scriptName\n return\n \n \n \n \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1561,"cells":{"__id__":{"kind":"number","value":4587025112080,"string":"4,587,025,112,080"},"blob_id":{"kind":"string","value":"b80c2549430578824cc2ddbe2349834c1e0b0533"},"directory_id":{"kind":"string","value":"af392bed30c33447f3c1472baaa4c045a70f9c3c"},"path":{"kind":"string","value":"/rots/testODE.py"},"content_id":{"kind":"string","value":"22602f51b0b1a1f0aadb395ee091367c9ea1ab8f"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"martin-bjork/return-of-the-spheres"},"repo_url":{"kind":"string","value":"https://github.com/martin-bjork/return-of-the-spheres"},"snapshot_id":{"kind":"string","value":"9490998719ff935f1715098436bc0833b6a8ee13"},"revision_id":{"kind":"string","value":"53d558db6f5fcb69875f200a5510f70c305bf724"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T19:09:49.790090","string":"2021-01-18T19:09:49.790090"},"revision_date":{"kind":"timestamp","value":"2013-06-26T22:06:57","string":"2013-06-26T22:06:57"},"committer_date":{"kind":"timestamp","value":"2013-06-26T22:06:57","string":"2013-06-26T22:06:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import pygame\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport ode\n\nimport traceback\nimport sys\n\nfrom graphics import init_graphics, textures\nfrom math_classes import vectors\n\ndef create_display_list():\n\n\tdisplayListIndex = glGenLists(1)\n\tglNewList(displayListIndex, GL_COMPILE)\n\n\tglMaterialfv(GL_FRONT, GL_AMBIENT, [1.0, 1.0, 1.0, 1.0])\n\tglMaterialfv(GL_FRONT, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])\n\tglMaterialfv(GL_FRONT, GL_SPECULAR, [1.0, 1.0, 1.0, 1.0])\n\tglMateriali(GL_FRONT, GL_SHININESS, 64)\n\n\tearth_big_str = textures.loadImage('graphics/texture_data/celestial_bodies/earth_big.jpg')\n\tearth_big_tex = textures.loadTexture(earth_big_str, 1024, 1024)\n\n\tglEnable(GL_TEXTURE_2D)\n\tglBindTexture(GL_TEXTURE_2D, earth_big_tex)\n\n\tquadric = gluNewQuadric()\n\n\tgluQuadricTexture(quadric, True)\n\tgluSphere(quadric, 0.5, 60, 60)\n\tglDisable(GL_TEXTURE_2D)\n\n\tglEndList()\n\treturn displayListIndex\n\n\n\ndef draw_sphere(index, body):\n\n\tx,y,z = body.getPosition()\n\tR = body.getRotation()\n\trot = [R[0], R[3], R[6], 0.,\n\t\t\tR[1], R[4], R[7], 0.,\n\t\t\tR[2], R[5], R[8], 0.,\n\t\t\tx, y, z, 1.0]\n\tglPushMatrix()\n\tglMultMatrixd(rot)\n\tglCallList(index)\n\tglPopMatrix()\n\ndef near_callback(args, geom1, geom2):\n\t\"\"\"Callback function for the collide() method.\n\n\tThis function checks if the given geoms do collide and\n\tcreates contact joints if they do.\n\t\"\"\"\n\t# Check if the objects do collide\n\tcontacts = ode.collide(geom1, geom2)\n\n\t# Create contact joints\n\tworld,contactgroup = args\n\tfor c in contacts:\n\t\tc.setBounce(0.2)\n\t\tc.setMu(5000)\n\t\tj = ode.ContactJoint(world, contactgroup, c)\n\t\tj.attach(geom1.getBody(), geom2.getBody())\n\n\ndef take_input():\n\tcurrentEvents = pygame.event.get() # cache current events\n\trun = True\n\tfor event in currentEvents:\n\t\tif event.type == QUIT or \\\n\t\t(event.type == KEYDOWN and event.key == K_ESCAPE):\n\t\t\trun = False\n\tkeyState = pygame.key.get_pressed()\n\n\txDir = keyState[K_d] - keyState[K_a]\n\tzDir = keyState[K_s] - keyState[K_w]\n\n\tdirection = [xDir, 0.0, zDir]\n\n\treturn run, direction\n\ndef main():\n\n\tinit_graphics.init_window('testODE')\n\n\t# Light source\n\tglLightfv(GL_LIGHT0,GL_POSITION,[0,0,1,0])\n\tglLightfv(GL_LIGHT0,GL_DIFFUSE,[1,1,1,1])\n\tglLightfv(GL_LIGHT0,GL_SPECULAR,[1,1,1,1])\n\tglEnable(GL_LIGHT0)\n\n\tgluLookAt (0.0, 3.6, 4.8, 0.0, 0.5, 0.0, 0.0, 1.0, 0.0)\n\n\t# Create a world object\n\tworld = ode.World()\n\tworld.setGravity( (0,-9.81,0) )\n\tworld.setERP(0.8)\n\tworld.setCFM(1E-5)\n\n\t# Create a space object\n\tspace = ode.Space()\n\n\t# Create a plane geom which prevent the objects from falling forever\n\tfloor = ode.GeomPlane(space, (0,1,0), 0)\n\n\t# Create sphere\n\tsphere_index = create_display_list()\n\n\tsphere_body = ode.Body(world)\n\tM = ode.Mass()\n\tM.setSphere(1, 0.5)\n\tsphere_body.setMass(M)\n\tsphere_body.setPosition((0,2,0))\n\n\tsphere_geom = ode.GeomSphere(space, 0.5)\n\tsphere_geom.setBody(sphere_body)\n\n\t# Create group for contact joints\n\tcontactgroup = ode.JointGroup()\n\n\tfps = 50\n\tdt = 1.0/fps\n\trun = True\n\tclock = pygame.time.Clock()\n\tspeed = 1\n\n\tlastDir = [0.0, 0.0, 0.0]\n\n\twhile run:\n\n\t\trun, direction = take_input()\n\n\t\t# Move\n\t\tif direction == lastDir:\n\t\t\tpass\n\t\telse:\n\t\t\tcurrent_vel = vectors.Vector(list(sphere_body.getLinearVel()))\n\t\t\tcorr_vel = vectors.Vector(lastDir)*-speed + vectors.Vector(direction)*speed\n\t\t\tnew_vel = current_vel + corr_vel\n\t\t\tsphere_body.setLinearVel(new_vel.value)\n\t\t\tlastDir = direction\n\n\t\t# Simulate\n\t\tn = 2\n\n\t\tfor i in range(n):\n\t\t\t# Detect collisions and create contact joints\n\t\t\tspace.collide((world,contactgroup), near_callback)\n\n\t\t\t# Simulation step\n\t\t\tworld.step(dt/n)\n\n\t\t\t# Remove all contact joints\n\t\t\tcontactgroup.empty()\n\n\t\t# Draw\n\t\tglClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n\t\tdraw_sphere(sphere_index, sphere_body)\n\t\tpygame.display.flip()\n\n\t\tclock.tick(fps)\n\nif __name__ == '__main__':\n\ttry:\n\t\tmain()\n\texcept Exception:\n\t\ttraceback.print_exc(file=sys.stdout)\n\tfinally:\n\t\tpygame.quit()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1562,"cells":{"__id__":{"kind":"number","value":12086038022380,"string":"12,086,038,022,380"},"blob_id":{"kind":"string","value":"713a5cd3c1ff62d60a2442bcbea72d5ca0b1fb6a"},"directory_id":{"kind":"string","value":"a7c3f62c86618d30cda2cf5f6510177c8ed8623b"},"path":{"kind":"string","value":"/pigui/clickable_label.py"},"content_id":{"kind":"string","value":"aa5e10c628f4f7258d984abbc12ee118fa1b4013"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"rjpen516/pigui"},"repo_url":{"kind":"string","value":"https://github.com/rjpen516/pigui"},"snapshot_id":{"kind":"string","value":"4fd71cb919d74cb5ea0bf3b672df6123ead07b99"},"revision_id":{"kind":"string","value":"a56bec1df111f5b8d5065859826f6f0036ac4b5d"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-16T11:50:42.614867","string":"2016-09-16T11:50:42.614867"},"revision_date":{"kind":"timestamp","value":"2014-12-31T03:30:09","string":"2014-12-31T03:30:09"},"committer_date":{"kind":"timestamp","value":"2014-12-31T03:30:09","string":"2014-12-31T03:30:09"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from Clickable import Clickable\nfrom button import Button\nfrom label import Label\nfrom color import *\n\n\n__author__ = 'richard'\n\n\nclass ClickableLabelButton(Button):\n def on_click(self, x, y):\n for button in self.buttons:\n value = self.buttons[button]\n if value[1] <= x <= value[2] and value[3] <= y <= value[4]:\n value[0](button)\n print \"Button Press on %s at (%s,%s) and sending button name\" % (button, x, y)\n\n\nclass ClickableLabel(Label, Clickable):\n def __init__(self, screen):\n super(ClickableLabel, self).__init__(screen)\n self.button = ClickableLabelButton(screen)\n\n def add_label(self, name, value, x, y, task, active=True):\n super(ClickableLabel, self).add_label(name, value, x, y, active)\n # create new button that will span the value\n self.add_attribute(name, 'base_x', x)\n self.add_attribute(name, 'base_y', y)\n self.button.add_button(name, task, x, y,\n self.get_attribute(name, 'size') * len(value) + x,\n self.get_attribute(name, 'size') + y)\n self.button.add_attribute(name, 'color', white)\n\n def add_attribute(self, name, type, value):\n if type == 'button_color':\n self.button.add_attribute(name, 'color', value)\n return\n\n super(ClickableLabel, self).add_attribute(name, type, value)\n # if the size changes, we need to update the button\n if type == 'size' or type == 'x' or type == 'y':\n self.button.set_button_placement(name,\n self.get_attribute(name, 'x'),\n self.get_attribute(name, 'y'),\n self.get_attribute(name, 'x') + self.get_attribute(name, 'size') * len(\n self.get_text(name)),\n self.get_attribute(name, 'y') + self.get_attribute(name, 'size'))\n\n self.render_queue.put(1)\n\n\n def set_text(self, name, value):\n super(ClickableLabel, self).set_text(name, value)\n # update that button to span the new text area\n self.button.set_button_placement(name,\n self.get_attribute(name, 'x'),\n self.get_attribute(name, 'y'),\n self.get_attribute(name, 'x') + self.get_attribute(name, 'size') * len(\n self.get_text(name)),\n self.get_attribute(name, 'y') + self.get_attribute(name, 'size'))\n\n def set_render_queue(self, queue):\n super(ClickableLabel, self).set_render_queue(queue)\n self.button.set_render_queue(queue)\n\n\n def set_runner(self, runner):\n super(ClickableLabel, self).set_runner(runner)\n self.button.set_runner(runner)\n\n def __render__(self):\n self.button.__render__()\n super(ClickableLabel, self).__render__()\n\n\n def on_click(self, x, y):\n self.button.on_click(x, y)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1563,"cells":{"__id__":{"kind":"number","value":17669495465255,"string":"17,669,495,465,255"},"blob_id":{"kind":"string","value":"4bd8a345522db43be0124b430cfc129134c52ff5"},"directory_id":{"kind":"string","value":"e9e1e7a57b0fc1e0d6cecb500459e6f56fd70f1a"},"path":{"kind":"string","value":"/implementation/source/python/model/Presentation/LayerManagerModel.py"},"content_id":{"kind":"string","value":"5a9c0a56a95f05fccec1d8f597870ad8ea32f2a6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"kle622/EClass"},"repo_url":{"kind":"string","value":"https://github.com/kle622/EClass"},"snapshot_id":{"kind":"string","value":"80eab8e24ff7f2ac4e264fc9f1a230c506624d7b"},"revision_id":{"kind":"string","value":"c25670a3020950bae1d93ca3179ed14315700d6b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-26T02:32:43.528592","string":"2020-12-26T02:32:43.528592"},"revision_date":{"kind":"timestamp","value":"2014-04-29T06:45:58","string":"2014-04-29T06:45:58"},"committer_date":{"kind":"timestamp","value":"2014-04-29T06:45:58","string":"2014-04-29T06:45:58"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from Layer import Layer\n\nclass LayerManagerModel:\n def __init__(self):\n background = Layer(\"Background\", 100, True)\n self.layers = [background]\n\n def DeleteLayer(self):\n print('From LayerManager.DeleteLayer()')\n\n def NewLayer(self, layer):\n print('From LayerManager.NewLayer()')\n self.layers.reverse()\n self.layers.append(layer)\n self.layers.reverse()\n \n def ChangeOpacity(self, index):\n print('From LayerManager.ChangeOpacity()')\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1564,"cells":{"__id__":{"kind":"number","value":2138893737785,"string":"2,138,893,737,785"},"blob_id":{"kind":"string","value":"87869c676e513efd6b528d7a1092f840f70ae090"},"directory_id":{"kind":"string","value":"bfcfdd8090c0fb4ac29952d372ca6975febad52b"},"path":{"kind":"string","value":"/chapter_5/exercise_10.py"},"content_id":{"kind":"string","value":"0dab394343017fabc7d2ef9280a36d6ca56cf8bf"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dangnhan-6426/PracticalPython"},"repo_url":{"kind":"string","value":"https://github.com/dangnhan-6426/PracticalPython"},"snapshot_id":{"kind":"string","value":"bf6f613de05b800de33ab5fe0c8a5aa9b37e0916"},"revision_id":{"kind":"string","value":"2ad0240e19f5b8f77bdb9eea7738229f8776add3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-12-31T05:27:30.513986","string":"2021-12-31T05:27:30.513986"},"revision_date":{"kind":"timestamp","value":"2013-08-29T21:17:57","string":"2013-08-29T21:17:57"},"committer_date":{"kind":"timestamp","value":"2013-08-29T21:17:57","string":"2013-08-29T21:17:57"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# The following code displays a message(s) about the acidity of a solution:\n# ph = float(input(\"Enter the ph level: \"))\n\n# if ph < 7.0:\n# print(\"It's acidic!\")\n# elif ph < 4.0:\n# print(\"It's a strong acid!\")\n\ndef acid_test(ph):\n if ph < 7.0:\n print(\"It's acidic!\")\n elif ph < 4.0:\n print(\"It's a strong acid\")\n\n# a. What message(s) are displayed when the user enters 6.4?\nacid_test(6.4)\n\n# b. What message(s) are displayed when the user enters 3.6?\nacid_test(3.6)\n\n# c. Make a small change to one line of the code so that both messages\n# are displayed when a value less than 4 is entered.\n\ndef acid_test(ph):\n if ph < 7.0:\n print(\"It's acidic!\")\n if ph < 4.0:\n print(\"It's a strong acid\")\n\nacid_test(3.9)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1565,"cells":{"__id__":{"kind":"number","value":5970004567928,"string":"5,970,004,567,928"},"blob_id":{"kind":"string","value":"b183833d1be7458b48b416d702b3edbea4802e2c"},"directory_id":{"kind":"string","value":"8fdcf5600565d44931013553a3edf1b41047cb3d"},"path":{"kind":"string","value":"/src/noclobberdict.py"},"content_id":{"kind":"string","value":"fc0d6d9cc9d68ff4af419758897ae4c4501ba7a5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"gberriz/datarail-2.0"},"repo_url":{"kind":"string","value":"https://github.com/gberriz/datarail-2.0"},"snapshot_id":{"kind":"string","value":"b310720c4f3054f3078a2e7cd892d184924324e4"},"revision_id":{"kind":"string","value":"4a6d132f2faa1e2f0e16360a9aefa6b5cd0c5a6b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T11:21:20.763195","string":"2021-01-10T11:21:20.763195"},"revision_date":{"kind":"timestamp","value":"2012-03-01T21:07:09","string":"2012-03-01T21:07:09"},"committer_date":{"kind":"timestamp","value":"2012-03-01T21:07:09","string":"2012-03-01T21:07:09"},"github_id":{"kind":"number","value":1007001,"string":"1,007,001"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"class NoClobberDict(dict):\n \"\"\"\n A dictionary whose keys may be assigned to at most once.\n \"\"\"\n def __setitem__(self, key, value):\n \"\"\"\n Assign value to self[key].\n \n If self[key] exists and is not equal to value, raise a\n ValueError.\n \"\"\"\n if key in self:\n v = self[key]\n if v != value:\n raise ValueError('key \"%s\" is already in dictionary, '\n 'with value %s' % (str(key), str(v)))\n else:\n super(NoClobberDict, self).__setitem__(key, value)\n\n def update(self, d=None, **kw):\n \"\"\"\n Update this dictionary with the values in d and **kw.\n\n The setting raises an exception if the updating would clobber\n an existing value.\n \"\"\"\n if not d is None:\n if hasattr(d, 'items'):\n items = d.items()\n else:\n items = d\n for k, v in items:\n self[k] = v\n \n for k, v in kw.items():\n self[k] = v\n\nif __name__ == '__main__':\n import unittest\n class Tests(unittest.TestCase):\n def test_setitem(self):\n \"\"\"Test that new values can't clobber old ones.\"\"\"\n d = NoClobberDict(x=1)\n d['x'] = 1\n self.assertRaises(ValueError, d.__setitem__, 'x', 2)\n\n def test_equality_test(self):\n \"\"\"Tests that equality (not identity) is the only criterion\n to test for for clobbering.\"\"\"\n d = NoClobberDict()\n d['x'] = []\n d['x'] = []\n self.assertRaises(ValueError, d.__setitem__, 'x', [1])\n d['y'] = None\n d['y'] = None\n\n def test_update(self):\n \"\"\"Test that update won't clobber.\"\"\"\n d = NoClobberDict(x=1)\n d.update({'x': 1})\n d.update(x=1)\n self.assertRaises(ValueError, d.update, {'x': 2})\n self.assertRaises(ValueError, d.update, x=2)\n\n print \"running tests\"\n unittest.main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1566,"cells":{"__id__":{"kind":"number","value":3994319589604,"string":"3,994,319,589,604"},"blob_id":{"kind":"string","value":"e58f4e7ee28c16396e990b5bebb87850c49a21ca"},"directory_id":{"kind":"string","value":"844390bdb77a4f6ad023ad182530f02256c78f43"},"path":{"kind":"string","value":"/Functions.py"},"content_id":{"kind":"string","value":"d867d6795dea251ed0445b3f284a77a1b6db0514"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"stal888/DropServ"},"repo_url":{"kind":"string","value":"https://github.com/stal888/DropServ"},"snapshot_id":{"kind":"string","value":"9b9235911eab5023031e2f285eaf8607c744b491"},"revision_id":{"kind":"string","value":"238c27263aa2a028ca060e52590aa50f6e88f908"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-03T01:04:18.154895","string":"2016-09-03T01:04:18.154895"},"revision_date":{"kind":"timestamp","value":"2012-07-22T20:07:52","string":"2012-07-22T20:07:52"},"committer_date":{"kind":"timestamp","value":"2012-07-22T20:07:52","string":"2012-07-22T20:07:52"},"github_id":{"kind":"number","value":3645666,"string":"3,645,666"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2012-07-22T19:47:31","string":"2012-07-22T19:47:31"},"gha_created_at":{"kind":"timestamp","value":"2012-03-07T04:13:39","string":"2012-03-07T04:13:39"},"gha_updated_at":{"kind":"timestamp","value":"2012-07-22T19:47:30","string":"2012-07-22T19:47:30"},"gha_pushed_at":{"kind":"timestamp","value":"2012-07-22T19:47:30","string":"2012-07-22T19:47:30"},"gha_size":{"kind":"number","value":176,"string":"176"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# DropServ.\n# This code is copyright (c) 2011 - 2012 by the PyBoard Dev Team \n# All rights reserved.\nfrom __future__ import division\nimport time\nimport hashlib\nimport random\nimport threading\nimport math\nimport os\nimport copy\nfrom collections import deque\nfrom pystache import Renderer\nallchars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHJKLMNOPQRSTUVWXYZ123456789\"\n\nclass Functions(object):\n \"\"\"\n Documentation is for losers\n \"\"\"\n def __init__(self, PyBoard):\n self.instance = PyBoard\n self.TemplateCache = deque()\n self.TemplateConstants = None\n self._refreshConstants()\n self.file_locks = {};\n print(self.instance.lang[\"FUNC_LOADED\"])\n\n def file_size(self, num):\n kb = num / 1024\n if kb > 1000:\n mb = kb / 1024\n return \"{0:03.2f} MB\".format(mb)\n else:\n return \"{0:03.2f} KB\".format(kb)\n\n def genAuthToken(self, user, origin):\n while True:\n sid = self.mkstring(5)\n if sid not in self.instance.Sessions:\n break\n times = int(math.floor(time.time()))\n token = hashlib.sha1(user[\"email\"] + origin + self.instance.conf[\"LoginSalt\"] + str(times)).hexdigest()\n self.instance.Sessions[sid] = (user[\"email\"], times)\n for x, v in self.instance.Sessions.items():\n if times - v[1] >= 86400:\n del self.instance.Sessions[x]\n return \"|\".join([sid, token])\n\n def hashPassword(self, password, salt=None):\n if salt == None:\n salt = self. mkstring(len(password))\n elif salt == \"\":\n return hashlib.sha512(password).hexdigest()\n else:\n salt = str(salt)\n if len(salt) != len(password):\n return (\"*\", salt)\n saltedPass = \"\".join(map(lambda x, y: x + y, password, salt))\n hashed = hashlib.sha512(saltedPass).hexdigest()\n return (hashed, salt)\n\n def mkstring(self, length):\n s = \"\"\n for x in range(length):\n if x == 2:\n s += \"l\"\n else:\n s += random.choice(allchars)\n return s\n\n def page_format(self, v={}, template=None, TemplateString=\"\", root=None):\n \"\"\"Format pages (obv)\"\"\"\n temp = None\n if root == None:\n root = self.instance.workd + \"/templates\"\n if template != None:\n if len(self.TemplateCache) >= 5:\n self.TemplateCache.popleft()\n for item in copy.copy(self.TemplateCache):\n if item[0] == template:\n if os.path.getmtime(\"{0}/{1}\".format(root, template)) > item[2]:\n self.TemplateCache.remove(item)\n break\n else:\n temp = item[1]\n break\n if not temp:\n if template not in self.file_locks:\n self.file_locks[template] = threading.RLock()\n self.file_locks[template].acquire()\n try:\n with open(root + \"/{0}\".format(template), \"r\") as plate:\n temp = plate.read()\n self.TemplateCache.append((template, temp, time.time()))\n self.file_locks[template].release()\n except IOError:\n if template in self.file_locks:\n self.file_locks[template].release()\n del self.file_locks[template]\n return \"\"\n elif TemplateString != \"\":\n temp = TemplateString\n else:\n return \"\"\n for x in v:\n if isinstance(v[x], basestring):\n try:\n v[x] = v[x].decode(\"utf-8\")\n except:\n pass\n formatted = Renderer().render(temp, self.instance.lang.getDict, v, constant=self.TemplateConstants)\n return formatted.encode(\"utf-8\")\n\n def read_faster(self, file, close=True):\n while True:\n c = file.read(16*4096)\n if c:\n yield c\n else:\n break\n if close:\n file.close()\n return\n\n def _refreshConstants(self):\n self.TemplateConstants = {\n \"version\": self.instance.conf[\"__version\"],\n \"root\": (\"/{0}\".format(self.instance.conf[\"Subfolder\"].strip(\"/\"))) if self.instance.conf[\"Subfolder\"].strip(\"/\") else \"\",\n }\n self.TemplateConstants[\"static\"] = \"{0}/static\".format(self.TemplateConstants[\"root\"])\n\n def verifyLogin(self, crumb, origin):\n pair = crumb.split('|')\n if pair[0] not in self.instance.Sessions:\n return None\n elif hashlib.sha1(self.instance.Sessions[pair[0]][0] + origin + self.instance.conf[\"LoginSalt\"] + str(self.instance.Sessions[pair[0]][1])).hexdigest() == pair[1]:\n return True\n else:\n return None\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1567,"cells":{"__id__":{"kind":"number","value":19138374295707,"string":"19,138,374,295,707"},"blob_id":{"kind":"string","value":"dadcbff05399df57d69ed76d35b877152314bb75"},"directory_id":{"kind":"string","value":"7b6a7dc2136f64bf9670c22fca6369e0899b61bf"},"path":{"kind":"string","value":"/pynes/tests/sprite_test.py"},"content_id":{"kind":"string","value":"60d32ebb870f49b1b8267cbbc0e9b3fa9645d97f"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"ebennaga/pyNES"},"repo_url":{"kind":"string","value":"https://github.com/ebennaga/pyNES"},"snapshot_id":{"kind":"string","value":"6e01f2ac42dc96f3d7eff3840807e30c0974e091"},"revision_id":{"kind":"string","value":"18dc183566d578af9cc776f83d05ab2d494fe90a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-11T05:31:53.446942","string":"2020-12-11T05:31:53.446942"},"revision_date":{"kind":"timestamp","value":"2014-05-05T15:13:46","string":"2014-05-05T15:13:46"},"committer_date":{"kind":"timestamp","value":"2014-05-05T15:13:46","string":"2014-05-05T15:13:46"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\nimport unittest\n\nfrom pynes import sprite\n\n\nclass SpriteTest(unittest.TestCase):\n\n def __init__(self, testcase_name):\n unittest.TestCase.__init__(self, testcase_name)\n f = open('fixtures/nerdynights/scrolling/mario.chr', 'rb')\n content = f.read()\n self.bin = [ord(c) for c in content]\n\n self.mario1 = [\n [0,0,0,0,0,0,1,1],\n [0,0,0,0,1,1,1,1],\n [0,0,0,1,1,1,1,1],\n [0,0,0,1,1,1,1,1],\n [0,0,0,3,3,3,2,2],\n [0,0,3,2,2,3,2,2],\n [0,0,3,2,2,3,3,2],\n [0,3,3,2,2,3,3,2]\n ]\n\n self.mario2 = [\n [1,1,1,0,0,0,0,0],\n [1,1,2,0,0,0,0,0],\n [1,2,2,0,0,0,0,0],\n [1,1,1,1,1,1,0,0],\n [3,2,2,2,0,0,0,0],\n [3,3,2,2,2,2,0,0],\n [2,2,2,2,2,2,2,0],\n [2,2,3,2,2,2,2,0]\n ]\n\n self.blank = [\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0],\n [0,0,0,0,0,0,0,0]\n ]\n\n def test_load_sprites(self):\n sprites = sprite.load_sprites('fixtures/nerdynights/scrolling/mario.chr')\n self.assertEquals(self.bin, sprites)\n\n def test_decode_first_sprite(self):\n channelA = self.bin[0:8]\n channelB = self.bin[8:16]\n s1 = sprite.decode_sprite(channelA, channelB)\n self.assertEquals(self.mario1, s1)\n\n def test_decode_second_sprite(self):\n channelA = self.bin[16:24]\n channelB = self.bin[24:32]\n\n s2 = sprite.decode_sprite(channelA, channelB)\n self.assertEquals(self.mario2, s2)\n\n def test_get_first_sprite(self):\n s1 = sprite.get_sprite(0, self.bin)\n self.assertEquals(self.mario1, s1)\n\n def test_get_second_sprite(self):\n s2 = sprite.get_sprite(1, self.bin)\n self.assertEquals(self.mario2, s2)\n\n def test_sprite_length(self):\n length = sprite.length(self.bin)\n self.assertEquals(512, length)\n\n def test_encode_first_sprite(self):\n encoded = sprite.encode_sprite(self.mario1)\n expected = self.bin[0:16]\n self.assertEquals(expected, encoded)\n\n def test_encode_second_sprite(self):\n encoded = sprite.encode_sprite(self.mario2)\n expected = self.bin[16:32]\n self.assertEquals(expected, encoded)\n\n def test_put_first_sprite(self):\n expected = [\n [0,1,2,3,0,1,2,3],\n [1,0,1,2,3,0,1,2],\n [2,1,0,1,2,3,0,1],\n [3,2,1,0,1,2,3,0],\n [0,3,2,1,0,1,2,3],\n [1,0,3,2,1,0,1,2],\n [2,1,0,3,2,1,0,1],\n [3,2,1,0,3,2,1,0]\n ]\n sprite.put_sprite(0, self.bin, expected)\n s1 = sprite.get_sprite(0, self.bin)\n self.assertEquals(expected, s1)\n\n def test_put_second_sprite(self):\n expected = [\n [0,1,2,3,0,1,2,3],\n [1,0,1,2,3,0,1,2],\n [2,1,0,1,2,3,0,1],\n [3,2,1,0,1,2,3,0],\n [0,3,2,1,0,1,2,3],\n [1,0,3,2,1,0,1,2],\n [2,1,0,3,2,1,0,1],\n [3,2,1,0,3,2,1,0]\n ]\n sprite.put_sprite(1, self.bin, expected)\n s1 = sprite.get_sprite(1, self.bin)\n self.assertEquals(expected, s1)\n\n def test_find_sprite_1(self):\n index = sprite.find_sprite(self.bin, self.mario1)\n self.assertEquals(0, index)\n\n def test_find_sprite_2(self):\n index = sprite.find_sprite(self.bin, self.mario2)\n self.assertEquals(1, index)\n\n def test_find_sprite_2(self):\n index = sprite.find_sprite(self.bin, self.blank, 256)\n self.assertEquals(292 - 256, index)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1568,"cells":{"__id__":{"kind":"number","value":111669178404,"string":"111,669,178,404"},"blob_id":{"kind":"string","value":"d6ce23da6734c164fd190d59ea77c089c960973a"},"directory_id":{"kind":"string","value":"98f3f3344267db43e784dc05de469ff520586f46"},"path":{"kind":"string","value":"/globusonline/transfer/api_client/examples/example.py"},"content_id":{"kind":"string","value":"843a28ddbc5e83d371a30dff03c4d434dda6ef1f"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rheiland/transfer-api-client-python"},"repo_url":{"kind":"string","value":"https://github.com/rheiland/transfer-api-client-python"},"snapshot_id":{"kind":"string","value":"eb446d9b0b3971d06a72ae917f2b0448d9fe53b8"},"revision_id":{"kind":"string","value":"1aeca3362e6767b2170ad0756e98e1db5fd7035b"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-24T14:55:59.513536","string":"2021-01-24T14:55:59.513536"},"revision_date":{"kind":"timestamp","value":"2013-09-05T18:03:32","string":"2013-09-05T18:03:32"},"committer_date":{"kind":"timestamp","value":"2013-09-05T18:03:32","string":"2013-09-05T18:03:32"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n# Copyright 2010 University of Chicago\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nDemonstrate API calls.\n\nExample run using standard globus toolkit certificate locations:\n\npython example.py USERNAME -k ~/.globus/userkey.pem -c ~/.globus/usercert.pem\n\"\"\"\nimport time\nfrom datetime import datetime, timedelta\nimport traceback\n\nfrom globusonline.transfer.api_client import Transfer, create_client_from_args\n\n# TransferAPIClient instance.\napi = None\n\ndef tutorial():\n \"\"\"\n Do a bunch of API calls and display the results. Does a small transfer\n between tutorial endpoints, but otherwise does not modify user data.\n\n Uses module global API client instance.\n \"\"\"\n # See what is in the account before we make any submissions.\n print \"=== Before tutorial ===\"\n display_tasksummary(); print\n display_task_list(); print\n display_endpoint_list(); print\n\n # auto activate the endpoint, and display before/after.\n display_activation(\"go#ep1\")\n display_activation(\"go#ep2\")\n\n print \"=== Before transfer ===\"\n display_ls(\"go#ep1\"); print\n display_ls(\"go#ep2\"); print\n\n # submit a transfer\n code, message, data = api.transfer_submission_id()\n submission_id = data[\"value\"]\n deadline = datetime.utcnow() + timedelta(minutes=10)\n t = Transfer(submission_id, \"go#ep1\", \"go#ep2\", deadline)\n t.add_item(\"/~/.bashrc\", \"/~/api-example-bashrc-copy\")\n code, reason, data = api.transfer(t)\n task_id = data[\"task_id\"]\n\n # see the new transfer show up\n print \"=== After submit ===\"\n display_tasksummary(); print\n display_task(task_id); print\n\n # wait for the task to complete, and see the summary and lists\n # update\n if wait_for_task(task_id):\n print \"=== After completion ===\"\n display_tasksummary(); print\n display_task(task_id); print\n display_ls(\"go#ep2\"); print\n\n\ndef display_activation(endpoint_name):\n print \"=== Endpoint pre-activation ===\"\n display_endpoint(endpoint_name)\n print\n code, reason, result = api.endpoint_autoactivate(endpoint_name,\n if_expires_in=600)\n if result[\"code\"].startswith(\"AutoActivationFailed\"):\n print \"Auto activation failed, ls and transfers will likely fail!\"\n print \"result: %s (%s)\" % (result[\"code\"], result[\"message\"])\n print \"=== Endpoint post-activation ===\"\n display_endpoint(endpoint_name)\n print\n\n\ndef display_tasksummary():\n code, reason, data = api.tasksummary()\n print \"Task Summary for %s:\" % api.username\n for k, v in data.iteritems():\n if k == \"DATA_TYPE\":\n continue\n print \"%3d %s\" % (int(v), k.upper().ljust(9))\n\n\ndef display_task_list(max_age=None):\n \"\"\"\n @param max_age: only show tasks requested at or after now - max_age.\n @type max_age: timedelta\n \"\"\"\n kwargs = {}\n if max_age:\n min_request_time = datetime.utcnow() - max_age\n # filter on request_time starting at min_request_time, with no\n # upper limit on request_time.\n kwargs[\"request_time\"] = \"%s,\" % min_request_time\n\n code, reason, task_list = api.task_list(**kwargs)\n print \"task_list for %s:\" % api.username\n for task in task_list[\"DATA\"]:\n print \"Task %s:\" % task[\"task_id\"]\n _print_task(task)\n\ndef _print_task(data, indent_level=0):\n \"\"\"\n Works for tasks and subtasks, since both have a task_id key\n and other key/values are printed by iterating through the items.\n \"\"\"\n indent = \" \" * indent_level\n indent += \" \" * 2\n for k, v in data.iteritems():\n if k in (\"DATA_TYPE\", \"LINKS\"):\n continue\n print indent + \"%s: %s\" % (k, v)\n\ndef display_task(task_id, show_subtasks=True):\n code, reason, data = api.task(task_id)\n print \"Task %s:\" % task_id\n _print_task(data, 0)\n\n if show_subtasks:\n code, reason, data = api.subtask_list(task_id)\n subtask_list = data[\"DATA\"]\n for t in subtask_list:\n print \" subtask %s:\" % t[\"task_id\"]\n _print_task(t, 4)\n\ndef wait_for_task(task_id, timeout=120):\n status = \"ACTIVE\"\n while timeout and status == \"ACTIVE\":\n code, reason, data = api.task(task_id, fields=\"status\")\n status = data[\"status\"]\n time.sleep(1)\n timeout -= 1\n\n if status != \"ACTIVE\":\n print \"Task %s complete!\" % task_id\n return True\n else:\n print \"Task still not complete after %d seconds\" % timeout\n return False\n\ndef display_endpoint_list():\n code, reason, endpoint_list = api.endpoint_list(limit=100)\n print \"Found %d endpoints for user %s:\" \\\n % (endpoint_list[\"length\"], api.username)\n for ep in endpoint_list[\"DATA\"]:\n _print_endpoint(ep)\n\ndef display_endpoint(endpoint_name):\n code, reason, data = api.endpoint(endpoint_name)\n _print_endpoint(data)\n\ndef _print_endpoint(ep):\n name = ep[\"canonical_name\"]\n print name\n if ep[\"activated\"]:\n print \" activated (expires: %s)\" % ep[\"expire_time\"]\n else:\n print \" not activated\"\n if ep[\"public\"]:\n print \" public\"\n else:\n print \" not public\"\n if ep[\"myproxy_server\"]:\n print \" default myproxy server: %s\" % ep[\"myproxy_server\"]\n else:\n print \" no default myproxy server\"\n servers = ep.get(\"DATA\", ())\n print \" servers:\"\n for s in servers:\n uri = s[\"uri\"]\n if not uri:\n uri = \"GC endpoint, no uri available\"\n print \" \" + uri,\n if s[\"subject\"]:\n print \" (%s)\" % s[\"subject\"]\n else:\n print\n\ndef unicode_(data):\n \"\"\"\n Coerce any type to unicode, assuming utf-8 encoding for strings.\n \"\"\"\n if isinstance(data, unicode):\n return data\n if isinstance(data, str):\n return unicode(data, \"utf-8\")\n else:\n return unicode(data)\n\ndef display_ls(endpoint_name, path=\"\"):\n code, reason, data = api.endpoint_ls(endpoint_name, path)\n # Server returns canonical path; \"\" maps to the users default path,\n # which is typically their home directory \"/~/\".\n path = data[\"path\"]\n print \"Contents of %s on %s:\" % (path, endpoint_name)\n headers = \"name, type, permissions, size, user, group, last_modified\"\n headers_list = headers.split(\", \")\n print headers\n for f in data[\"DATA\"]:\n print \", \".join([unicode_(f[k]) for k in headers_list])\n\nif __name__ == '__main__':\n api, _ = create_client_from_args()\n tutorial()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1569,"cells":{"__id__":{"kind":"number","value":7876970046030,"string":"7,876,970,046,030"},"blob_id":{"kind":"string","value":"c6a4ad302e10b79133256ca9017e9b17f188e1b3"},"directory_id":{"kind":"string","value":"183caf378df099da122f65ea9b75002b1e12b774"},"path":{"kind":"string","value":"/projFocus/ceRNA/processData/parseBS/get3PrimeUTRCoords.py"},"content_id":{"kind":"string","value":"595463c9a96cebde82db3c8a67f9e431eae41f1d"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"cwt1/scripts-1"},"repo_url":{"kind":"string","value":"https://github.com/cwt1/scripts-1"},"snapshot_id":{"kind":"string","value":"f58e476ddb2c83e0480856a95a95a644ad3c001c"},"revision_id":{"kind":"string","value":"061d6592aa6ab11c93363fcb40305a57db05e3f2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-28T01:31:30.896133","string":"2021-05-28T01:31:30.896133"},"revision_date":{"kind":"timestamp","value":"2014-08-25T19:02:37","string":"2014-08-25T19:02:37"},"committer_date":{"kind":"timestamp","value":"2014-08-25T19:02:37","string":"2014-08-25T19:02:37"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n#J.HE\n'''Descp.: Given binding site sequence infor, search the genomic coordinaites in\nDNA sequanece, get the coordinate of miRNA binding sites for each gene'''\n\nimport sys,getopt\nimport re\nfrom collections import defaultdict\nfrom searchStr import bruteSearch \nfrom searchStr import searchUTR\nargv = sys.argv[1:]\ninput = ''\noutput = ''\nusage = \"\"\nexample = \"\"\ntry:\n opts,args = getopt.getopt(argv,\"hc:d:o:\")\nexcept getopt.GetoptError:\n print usage + \"\\n\" + example \n sys.exit(2)\nfor opt, arg in opts:\n if opt == '-h':\n print usage + \"\\n\" + example \n sys.exit()\n elif opt in (\"-c\"):\n cupidseq = arg\n elif opt in (\"-d\"):\n dnaseq = arg\n elif opt in (\"-o\"):\n output = arg\nprint('Script path:\\t'+ sys.argv[0])\nprint('Input file:\\t' + cupidseq)\nprint('Input file:\\t'+ dnaseq)\nprint('Output file:\\t'+ output )\n\n##load all cupidseq \nbsSeqDict = defaultdict(list)\nwith(open(cupidseq)) as f:\n line = f.readline()\n line = f.readline()\n while line:\n gene, bsseqinfo = line.strip().split(\"\\t\")\n for x in bsseqinfo.split(\";\"):\n bsSeqDict[gene].append(x.lower())\n line = f.readline()\nprint \"binding seq loaded\"\n# print bsSeqDict.items()[1]\n\n##process DNA seq by gene\ndef find_all(qstr, allstr):\n start=0\n while True:\n start = allstr.find(qstr, start)\n if start == -1: return\n yield start\n start += len(qstr)\n\noutputH = open(output, 'w')\noutputH.write(\"Symbol\\tChr:bsStart-bsEnd\\n\")\ncnt = 0 \nwith(open(dnaseq)) as f:\n line = f.readline()\n while line:\n cnt = cnt + 1\n if cnt % 1000 == 0 :\n print \" %s line processed\" % cnt\n if not re.match(\"^Symbol\",line):\n gene, coord, seq = line.strip().split(\"\\t\")\n chrom, tss, tse = re.split(\":|-\", coord) \n if bsSeqDict.get(gene, ''):\n outbss = []\n for bsseq in bsSeqDict[gene]:\n bsstart, querySeq = bsseq.split(\":\")\n bsstart = int(bsstart)\n for bsindex in searchUTR(querySeq, seq): \n outbss.append(int(tss) + bsindex + bsstart )\n outRec = gene + \"\\t\" + chrom + \":\" + \\\n \";\".join(map(str, list(set(outbss))))\n outputH.write(outRec + \"\\n\" )\n del bsSeqDict[gene]\n line = f.readline()\noutputH.close()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1570,"cells":{"__id__":{"kind":"number","value":15693810534726,"string":"15,693,810,534,726"},"blob_id":{"kind":"string","value":"ca2171082e79b7958ccdfbe3d40006de1a10c690"},"directory_id":{"kind":"string","value":"59387662fba5de9d20209d855e688266aabe4961"},"path":{"kind":"string","value":"/demo/urls.py"},"content_id":{"kind":"string","value":"6240986a5a6fc05c312c11e9b876072cbd56b685"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"jordic/django_tiny_shop"},"repo_url":{"kind":"string","value":"https://github.com/jordic/django_tiny_shop"},"snapshot_id":{"kind":"string","value":"b41eac9f3d0d12b378359817d65ac34c9977a676"},"revision_id":{"kind":"string","value":"46dd4c1c2e3fdf96676c3d02ad197b7cecff6bc3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T22:01:29.712718","string":"2021-01-18T22:01:29.712718"},"revision_date":{"kind":"timestamp","value":"2014-12-01T05:45:37","string":"2014-12-01T05:45:37"},"committer_date":{"kind":"timestamp","value":"2014-12-01T05:45:37","string":"2014-12-01T05:45:37"},"github_id":{"kind":"number","value":3014559,"string":"3,014,559"},"star_events_count":{"kind":"number","value":4,"string":"4"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2018-06-08T10:48:15","string":"2018-06-08T10:48:15"},"gha_created_at":{"kind":"timestamp","value":"2011-12-19T19:51:47","string":"2011-12-19T19:51:47"},"gha_updated_at":{"kind":"timestamp","value":"2017-09-25T19:39:20","string":"2017-09-25T19:39:20"},"gha_pushed_at":{"kind":"timestamp","value":"2016-04-19T06:02:30","string":"2016-04-19T06:02:30"},"gha_size":{"kind":"number","value":588,"string":"588"},"gha_stargazers_count":{"kind":"number","value":4,"string":"4"},"gha_forks_count":{"kind":"number","value":1,"string":"1"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"string","value":"JavaScript"},"gha_archived":{"kind":"bool","value":false,"string":"false"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n# Autor: jordi collell \n# http://tempointeractiu.cat\n# -------------------------------------------------------------------\n'''\n'''\n\nfrom django.conf.urls.defaults import patterns, include, url\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n \n url(r'^checkout/paypal/ipn', include('paypal.standard.ipn.urls')),\n url(r'', include('shop.urls')),\n url(r'^admin/', include(admin.site.urls)),\n\n \n)\n\n\nif settings.DEBUG:\n\turlpatterns += patterns('',\n (r'^media/admin/(?P.*)$', 'django.views.static.serve', {'document_root': './media/admin/'}),\n (r'^media/(?P.*)$', 'django.views.static.serve', {'document_root':'./media/'}),\n )"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1571,"cells":{"__id__":{"kind":"number","value":8297876822675,"string":"8,297,876,822,675"},"blob_id":{"kind":"string","value":"fb163ea1ba1d790c1eaea50bcaf62aab99c29758"},"directory_id":{"kind":"string","value":"76288367bd583fe05faf79759e1fbdb8b66def2e"},"path":{"kind":"string","value":"/01_Part1/handout/code/mapper_template.py"},"content_id":{"kind":"string","value":"72a08ca48152056dcdb70826473f3d31901c388c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"parijitkedia/eth-datamining"},"repo_url":{"kind":"string","value":"https://github.com/parijitkedia/eth-datamining"},"snapshot_id":{"kind":"string","value":"7a89a5790697ba4bef23a9c1dc110f30fa901abf"},"revision_id":{"kind":"string","value":"80360563e28829af15bed9c1c888ad66d8769e13"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-05-28T11:16:09.391042","string":"2021-05-28T11:16:09.391042"},"revision_date":{"kind":"timestamp","value":"2014-06-10T20:55:46","string":"2014-06-10T20:55:46"},"committer_date":{"kind":"timestamp","value":"2014-06-10T20:55:46","string":"2014-06-10T20:55:46"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nimport numpy as np\nimport sys\n\ndef emit(key, value):\n # write to stdout\n print(key + '\\t' + value)\n\ndef getMinHashSignature(shingles, hash_fns):\n #print(\"number of hash fns: \" + str(len(hash_fns)))\n M = len(hash_fns) * [int(max(shingles))+100]\n\n for row in range(int(max(shingles))+1):\n if row in shingles:\n\n #print(\"Video has shingle \" + str(row))\n for i,hash_fn in enumerate(hash_fns):\n #print('hashfn: ' + str(hash_fn))\n M[i] = min(M[i], h(hash_fn,row))\n\n #print(M)\n return M\n\ndef partition(value, shingles, R, B, hash_fns):\n M = getMinHashSignature(shingles, hash_fns);\n\n for b in range(B): \n key = ''\n for r in range(R):\n row = b*R+r;\n\n key += str(M[row])\n\n emit(key, value)\n\ndef h(permutator, row):\n return (permutator[0] * row + permutator[1]) % permutator[2]\n\ndef get_permutation_descriptor(size):\n a = np.random.randint(size)\n b = np.random.randint(size)\n return (a,b,size)\n\nif __name__ == \"__main__\":\n # Very important. Make sure that each machine is using the\n # same seed when generating random numbers for the hash functions.\n np.random.seed(seed=42)\n \n # Configuration\n num_features = 10000;\n t = 0.85\n n = 256; # number of hashes\n\n # B and R will produce threshhold of 0.8. Giving more FP.\n # This produces \"only more work\"\n B = 16;\n R = 16;\n\n # Generate hash functions\n hash_sigs = []\n for i in range(R*B):\n hash_sigs.append( get_permutation_descriptor(num_features) )\n\n for line in sys.stdin:\n line = line.strip()\n video_id = int(line[6:15])\n shingles = line[16:].split()\n value = str(video_id) + \" \" + line[16:]\n shingles = np.fromstring(line[16:], sep=\" \")\n partition(value, shingles, R, B, hash_sigs)\n\n #print(\"-----\")\n #print(\"Config: R=\" + str(R) + \" B=\" + str(B))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1572,"cells":{"__id__":{"kind":"number","value":2800318694059,"string":"2,800,318,694,059"},"blob_id":{"kind":"string","value":"24ab983f3b75101bf2a483ca106ceba60c39abde"},"directory_id":{"kind":"string","value":"bf574bc57fb27a8a92a8b585d37b43d7ad4329c2"},"path":{"kind":"string","value":"/rubiks_solver.py"},"content_id":{"kind":"string","value":"e29bbc54bcebc8ba4738de198b7db3b03ba5abf0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"rufpierre/rubiks_solver"},"repo_url":{"kind":"string","value":"https://github.com/rufpierre/rubiks_solver"},"snapshot_id":{"kind":"string","value":"c8a53abb0aa4c3d4a1ecd6082f053e200ddcb4d0"},"revision_id":{"kind":"string","value":"cfea97f07a48c48c671bc9dea7bea7a324a2d560"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T07:10:28.825636","string":"2021-01-20T07:10:28.825636"},"revision_date":{"kind":"timestamp","value":"2014-03-16T16:26:41","string":"2014-03-16T16:26:41"},"committer_date":{"kind":"timestamp","value":"2014-03-16T16:26:41","string":"2014-03-16T16:26:41"},"github_id":{"kind":"number","value":17499749,"string":"17,499,749"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom engine import *\nfrom collections import Counter\n\nhelp = \"\"\"f,r,b,l,d,t for standard transformations\nF,R,B,L,D,T for reverse transformations\nz to reset the cube\nq to quit\n\"\"\"\n\n# clean screen\nprint(\"\\033c\")\n\n# init the display\nprint(help)\nprint_cube(cube)\n\n# choose between CLI mode or static mode\ncli = True\n\n\n#==============================================================================\n# en cours:\n# essayer de comprendre les genes et les organismes\n# bidouiller l exemple que jai recopié dans le repertoire (le duppliquer)\n# \tpour l amener vers un gene string au lieu de float\n# \n# en cours 2\n# ai fait une seule classe de gene (pas d heritage)\n# il faut minimiser la fonction de fitness (donc l inverser)\n# bien comprendre comment marche le truc avec les floats\n#==============================================================================\n\n# fitness of the cube\ndef variance(x):\n\t\"\"\"variance = 1/(n-1) * sum(1,n,(x-m)²)\"\"\"\n\tm = float(sum(x))/len(x)\n\treturn sum([(e-m)**2 for e in x])/(len(x)-1)\n\ndef dist(x):\n\t\"\"\"distribution of a list of values\"\"\"\n\tc = Counter(x)\n\treturn [c[e] for e in sorted(c)]\n\ndef dist2(x):\n\t\"\"\"distribution of the colors on a cube facet\"\"\"\n\treturn [x.count(e) for e in range(6)]\n\ndef fitness(cube):\n\t\"\"\"fitness of the cube is the sum of the variances for each facet of the cube\n\tmax score multiplied by -1.\n cube in its initial state is -81 wich is the lowest and best score\n 'cube in a cube' is -33\"\"\"\n\treturn -sum([variance(dist2(cube[facet])) for facet in range(6)])\n\n\n# static mode\nif (not cli):\n\trand_inst = \"flfTrtffllTLbDBllt\"\n\tpons_asinorum = \"F2 B2 R2 L2 U2 D2\"\n\tcube_in_a_cube = \"F L F U' R U F2 L2 U' L' B D' B' L2 U\"\n\tcube = chain(cube_in_a_cube, cube)\n\tprint_cube(cube)\n\tprint(fitness(cube))\n\n# CLI mode\nwhile cli:\n\traw_in = raw_input()\n\t# quit\n\tif raw_in == 'q':\n\t\t# clean screen\n\t\tprint(\"\\033c\")\n\t\t# exit the loop\n\t\tbreak\n\t# put back the cube in its initial state\n\telif raw_in == 'z':\n\t\tcube = original_cube\n\t\tprint(\"\\033c\")\n\t\tprint(help)\n\t\tprint_cube(cube)\n\t# execute the string of commands\n\telse:\t\n\t\tcube = chain(raw_in, cube)\n\t\tprint(help)\n\t\tprint_cube(cube)\n\t\tprint(fitness(cube))\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1573,"cells":{"__id__":{"kind":"number","value":4999341968247,"string":"4,999,341,968,247"},"blob_id":{"kind":"string","value":"535c94e80ade31c5fa0aa6a5c711e2cc9b0f9fe5"},"directory_id":{"kind":"string","value":"d2813d7fa0bdaf925365e35a2f9ef2b38781eb4a"},"path":{"kind":"string","value":"/trash/old_before merge_20.06.14/registration/models.py"},"content_id":{"kind":"string","value":"d01a97dd592d091f59f34104309fab795ede6fa0"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"dendht/Django"},"repo_url":{"kind":"string","value":"https://github.com/dendht/Django"},"snapshot_id":{"kind":"string","value":"e4cfcf4bef51e065167d99a7178686323d40ec7e"},"revision_id":{"kind":"string","value":"c79595c45d29f1a4445aaf21dd0b3b4b099d7953"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T23:55:10.805362","string":"2016-09-05T23:55:10.805362"},"revision_date":{"kind":"timestamp","value":"2014-06-22T18:38:05","string":"2014-06-22T18:38:05"},"committer_date":{"kind":"timestamp","value":"2014-06-22T18:38:05","string":"2014-06-22T18:38:05"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.contrib.auth.models import models\nfrom django import forms as forms\nfrom django.contrib.auth import authenticate, login, logout\n\n\nclass User(models.Model):\n email = models.TextField(max_length=50)\n username = models.TextField(max_length=30)\n password = models.TextField(max_length=50)\n date_joined = models.DateTimeField(auto_now_add=True)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1574,"cells":{"__id__":{"kind":"number","value":7636451900607,"string":"7,636,451,900,607"},"blob_id":{"kind":"string","value":"a6e009300988f21c7cdfdaa67326724e56a9dc6c"},"directory_id":{"kind":"string","value":"424253b371ba2de58e6041bf9ce7705d75385ea0"},"path":{"kind":"string","value":"/django/pycoon/tags/urls.py"},"content_id":{"kind":"string","value":"1a97551cfab0b37df267f1d1f9ec9333c57c7d24"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"digitaldingo/pycoon"},"repo_url":{"kind":"string","value":"https://github.com/digitaldingo/pycoon"},"snapshot_id":{"kind":"string","value":"dcbde8fa2d7cadd9d7361e2f42c9bd61ea8fd8d3"},"revision_id":{"kind":"string","value":"8fa7c671d2606aeefca74ea54c3a7071a9432483"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2019-04-07T15:48:30.823373","string":"2019-04-07T15:48:30.823373"},"revision_date":{"kind":"timestamp","value":"2014-08-21T17:58:17","string":"2014-08-21T17:58:17"},"committer_date":{"kind":"timestamp","value":"2014-08-21T17:58:17","string":"2014-08-21T17:58:17"},"github_id":{"kind":"number","value":2409396,"string":"2,409,396"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls import patterns, url\n\nfrom .views import TagListView, TagView, TagFilterListView, TagFilterView\n\nurlpatterns = patterns('',\n url(r'^$', TagListView.as_view(), name=\"tag_list\"),\n url(r'^(?P\\d+)/$', TagView.as_view(), name=\"tag_details\"),\n\n url(r'^filters/$', TagFilterListView.as_view(), name=\"tagfilter_list\"),\n url(r'^filters/(?P\\d+)/$', TagFilterView.as_view(), name=\"tagfilter_details\"),\n)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1575,"cells":{"__id__":{"kind":"number","value":3977139739865,"string":"3,977,139,739,865"},"blob_id":{"kind":"string","value":"7bf6c9d5b4484aba911ddbab11f2248aaea648d1"},"directory_id":{"kind":"string","value":"a1919ed17fae2c982a0e949521e86fb748cb1d35"},"path":{"kind":"string","value":"/learner_mouse.py"},"content_id":{"kind":"string","value":"093b973fc7ba0bd49b0fab97730c58076833c29f"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"TurpIF/gestures-learning"},"repo_url":{"kind":"string","value":"https://github.com/TurpIF/gestures-learning"},"snapshot_id":{"kind":"string","value":"f3749afc79a9d4b81a976352389fd6e5cd1e73c7"},"revision_id":{"kind":"string","value":"54649a13235ff3df45989bb28e602d417463f2b6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T13:13:07.302367","string":"2016-09-06T13:13:07.302367"},"revision_date":{"kind":"timestamp","value":"2014-04-07T12:04:51","string":"2014-04-07T12:04:51"},"committer_date":{"kind":"timestamp","value":"2014-04-07T12:04:51","string":"2014-04-07T12:04:51"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import pymouse\nimport time\nimport sys\n\nclass Move(object):\n \"\"\"\n Basic structure representing a move.\n\n Arguments:\n name -- String : Name of the move\n descr -- [(Integer, Integer)] : Temporal list of (dx, dy) representing the\n movement.\n \"\"\"\n def __init__(self, name, descr):\n assert len(descr) >= 1\n super(Move, self).__init__()\n self.name = name\n self.descr = descr\n\n def __str__(self):\n \"\"\"\n String representation of the move.\n\n Returns:\n String representation\n \"\"\"\n pos_str = map(lambda p: '%d %d' % p, self.descr)\n return '%s %s' % (self.name, ' '.join(pos_str))\n\n @classmethod\n def from_string(cls, string):\n \"\"\"\n Construct a *Move* from a string.\n\n Arguments:\n string -- Input string to transfom into Move\n\n Returns:\n The constructed move\n\n Raises:\n ValueError : When the string format is not good\n \"\"\"\n words = string.split(' ')\n if len(words) < 3:\n raise ValueError('A move have to contain a minimum of a name and one position.')\n if len(words) % 2 != 1:\n raise ValueError('Expected one more integer')\n\n name = words[0]\n try:\n ints = map(int, words[1:])\n except ValueError as e:\n raise e\n couples = zip(ints[::2], ints[1::2])\n return cls(name, couples)\n\n def save(self, file=sys.stdout):\n \"\"\"\n Write the moves into the file *file*\n\n Arguments:\n file -- File : File to write in\n\n Raises:\n IOError : When it's impossible to write into the file\n \"\"\"\n try:\n file.write(str(self) + '\\n')\n except IOError:\n raise\n\ndef acquire_move(size, time_sleep=0.005):\n \"\"\"\n Get a mouse move with a size of *size* points.\n\n Arguments:\n size -- Integer : The number of position taken for the move\n time_sleep -- Real : Time to sleep between taking the positions (default\n 0.005)\n\n Returns:\n [Real] : A list of size *size* containing the moves (dx, dy).\n \"\"\"\n mouse = pymouse.PyMouse()\n o = mouse.position()\n move = []\n for _ in xrange(size):\n pos = mouse.position()\n dx = pos[0] - o[0]\n dy = pos[1] - o[1]\n move.append((dx, dy))\n time.sleep(time_sleep)\n return move\n\ndef wait_mouse_move(static_threashold=20):\n \"\"\"\n Wait and block until the mouse move by *static_threashold*.\n\n Arguments:\n static_threashold -- Real : Distance the mouse has to move (default 20)\n \"\"\"\n mouse = pymouse.PyMouse()\n o = mouse.position()\n while abs(mouse.position()[0] - o[0]) + abs(mouse.position()[1] - o[1]) \\\n < static_threashold:\n time.sleep(0.01)\n\nif __name__ == '__main__':\n cont = True\n moves = []\n\n print 'Move name ?',\n name = raw_input()\n\n while cont:\n print 'Waiting the beginning of the move...'\n wait_mouse_move()\n print 'Recording the move...'\n move = Move(name, acquire_move(100))\n print 'Keep it ? (y/n)',\n if raw_input() == 'y':\n moves.append(move)\n print 'Continue ? (y/n)',\n cont = raw_input() == 'y'\n\n if moves:\n _f_name = name.lower() + '.mv'\n print 'Save moves into ? [%s]' % _f_name,\n f_name = raw_input()\n if not f_name:\n f_name = _f_name\n\n print 'Saving into %s...' % f_name,\n with open(f_name, 'w+') as f:\n for m in moves:\n m.save(f)\n print 'OK'\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1576,"cells":{"__id__":{"kind":"number","value":11012296163839,"string":"11,012,296,163,839"},"blob_id":{"kind":"string","value":"0ae5c587a42e8d81deb7e1f6c9c15d0b586937ff"},"directory_id":{"kind":"string","value":"7a27cc67bed96eb14eac5d12c1d519df44b1f1d4"},"path":{"kind":"string","value":"/Standardbib/umbenenner.py"},"content_id":{"kind":"string","value":"a9709c05c0b2f0248b227f84411d07a0405e4e92"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"DerNerger/Python"},"repo_url":{"kind":"string","value":"https://github.com/DerNerger/Python"},"snapshot_id":{"kind":"string","value":"b24bcfc2f5d331182f8ba86ccc5afdc3f8a2e47d"},"revision_id":{"kind":"string","value":"43881d7460505cd096ff851bd10e7e2fe48c0f3f"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T22:11:04.247761","string":"2021-01-23T22:11:04.247761"},"revision_date":{"kind":"timestamp","value":"2014-05-23T12:27:13","string":"2014-05-23T12:27:13"},"committer_date":{"kind":"timestamp","value":"2014-05-23T12:27:13","string":"2014-05-23T12:27:13"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import os\n\nfor filename in os.listdir(os.getcwd()):\n if filename.endswith(\".htm\") :\n \tos.rename(os.path.join(os.getcwd(),filename), os.path.join(os.getcwd(),filename+\"l\"))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1577,"cells":{"__id__":{"kind":"number","value":11716670805696,"string":"11,716,670,805,696"},"blob_id":{"kind":"string","value":"2d6d379cbf76ee79679b47768d26ee16f758c680"},"directory_id":{"kind":"string","value":"295be1340919ee361eebc7e03f98769b335aafff"},"path":{"kind":"string","value":"/map2.py"},"content_id":{"kind":"string","value":"449fd837347d1b16b5cc64562f80a8ae0a270726"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Philly2034/repos1"},"repo_url":{"kind":"string","value":"https://github.com/Philly2034/repos1"},"snapshot_id":{"kind":"string","value":"d207d61b8df3d5708086093e97f61dab404f86a5"},"revision_id":{"kind":"string","value":"971f17a264cd445a762db135f54e52b16d12fb26"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-20T11:22:38.894245","string":"2021-01-20T11:22:38.894245"},"revision_date":{"kind":"timestamp","value":"2014-11-19T21:19:23","string":"2014-11-19T21:19:23"},"committer_date":{"kind":"timestamp","value":"2014-11-19T21:19:23","string":"2014-11-19T21:19:23"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"print \"Starting map.py\"\n\nimport numpy\nimport matplotlib as mpl\n\nmpl.use('Agg')\n\nimport matplotlib.pyplot as plt\n\ncm = plt.cm.get_cmap('jet')\na=open('map2.csv')\nvar=numpy.loadtxt(a)\nprint \"Opened Files\"\n\ny=var[:,0:1]\nx=var[:,1:2]\ncolors=var[:,2:3]\nprint \"got data\"\n\nplt.scatter(x,y,c=colors,s=1,cmap=cm,marker=\"s\",edgecolors='none',alpha=0.5)\nplt.savefig('map2.png',bbox_inches='tight', transparent=True,pad_inches=0)\n\nprint \"done map.py\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1578,"cells":{"__id__":{"kind":"number","value":1778116466290,"string":"1,778,116,466,290"},"blob_id":{"kind":"string","value":"687c0304e698cd6690be517bb1e2f1bf7a8e5c14"},"directory_id":{"kind":"string","value":"a872cd30172e5c3d793b6cd387324bb7b59247cf"},"path":{"kind":"string","value":"/github_to_html/footer.py"},"content_id":{"kind":"string","value":"2b6a0e14fc48d481678fa504ee3366d720ada062"},"detected_licenses":{"kind":"list like","value":["CC0-1.0"],"string":"[\n \"CC0-1.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"k-satoda/cpprefjp-andare"},"repo_url":{"kind":"string","value":"https://github.com/k-satoda/cpprefjp-andare"},"snapshot_id":{"kind":"string","value":"ff6ea24fa5d71b87c9cdb583510b29bd29b881a6"},"revision_id":{"kind":"string","value":"f165292b809f9f7333182ed5e07d1de570cdffa4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-16T20:22:37.626336","string":"2021-01-16T20:22:37.626336"},"revision_date":{"kind":"timestamp","value":"2014-04-24T12:57:26","string":"2014-04-24T12:57:26"},"committer_date":{"kind":"timestamp","value":"2014-04-24T12:57:26","string":"2014-04-24T12:57:26"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#coding: utf-8\nimport markdown\nfrom markdown.util import etree\n\nclass FooterExtension(markdown.Extension):\n \"\"\" Footer Extension. \"\"\"\n\n def __init__(self, configs):\n # デフォルトの設定\n self.config = {\n 'url' : [None, 'URL'],\n }\n\n # ユーザ設定で上書き\n for key, value in configs:\n self.setConfig(key, value)\n\n def extendMarkdown(self, md, md_globals):\n footer = FooterTreeprocessor()\n footer.config = self.getConfigs()\n md.registerExtension(self)\n md.treeprocessors.add('footer', footer, '_begin')\n\nclass FooterTreeprocessor(markdown.treeprocessors.Treeprocessor):\n \"\"\" Build and append footnote div to end of document. \"\"\"\n\n def _make_footer(self):\n footer = etree.Element('footer')\n a = etree.SubElement(footer, 'a')\n a.set('href', self.config['url'])\n a.text = u'編集'\n return footer\n\n def run(self, root):\n footer = self._make_footer()\n root.append(footer)\n\ndef makeExtension(configs=[]):\n return FooterExtension(configs=configs)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1579,"cells":{"__id__":{"kind":"number","value":9749575778926,"string":"9,749,575,778,926"},"blob_id":{"kind":"string","value":"1d0b75016a895ca016f3894f8ca5c74d6ab57b62"},"directory_id":{"kind":"string","value":"64f95bcc83ac44292a6bc194fe64167704479b5d"},"path":{"kind":"string","value":"/Bridge.py"},"content_id":{"kind":"string","value":"71247a7d6ae8ebb46fc2af079242180d079fe02a"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"pisupat/Everpix-Unarchiver"},"repo_url":{"kind":"string","value":"https://github.com/pisupat/Everpix-Unarchiver"},"snapshot_id":{"kind":"string","value":"ef89716db24b054cf85232c7a0e08fb065a29b44"},"revision_id":{"kind":"string","value":"c43458a4394e68a3c20fb384e1eb44cee8411f63"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T00:55:11.508411","string":"2021-01-18T00:55:11.508411"},"revision_date":{"kind":"timestamp","value":"2014-01-05T04:16:43","string":"2014-01-05T04:16:43"},"committer_date":{"kind":"timestamp","value":"2014-01-05T04:16:43","string":"2014-01-05T04:16:43"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import cPickle\nimport zlib\n\ndef decode(data):\n return cPickle.loads(zlib.decompress(data))\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1580,"cells":{"__id__":{"kind":"number","value":3358664473248,"string":"3,358,664,473,248"},"blob_id":{"kind":"string","value":"243e6fcabf57579dd1475678dc373b83bb595fab"},"directory_id":{"kind":"string","value":"b9418eaf0d9c4760b139afc7a683f5d2dc98e3b2"},"path":{"kind":"string","value":"/src/data_collection/PERIxml.py"},"content_id":{"kind":"string","value":"7ee45dbffafa04a56a106b9dd7273ab322113f9e"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"heathharrelson/PerfTrack"},"repo_url":{"kind":"string","value":"https://github.com/heathharrelson/PerfTrack"},"snapshot_id":{"kind":"string","value":"42cd538f287040d18cf177337aeef8b13bc21168"},"revision_id":{"kind":"string","value":"62e7ef947cb154f3f0068db9bc940554ecf01553"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-16T11:45:31.002789","string":"2020-04-16T11:45:31.002789"},"revision_date":{"kind":"timestamp","value":"2013-08-16T19:03:25","string":"2013-08-16T19:03:25"},"committer_date":{"kind":"timestamp","value":"2013-08-16T19:03:25","string":"2013-08-16T19:03:25"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\n\nimport xml.dom.minidom\nimport datetime\n\n# This class is used for generating PERI xml\n\nclass PERIxml:\n identifier = 0\n def __init__(self):\n self.build = None\n self.run = None\n self.genPERIheader()\n # what function is used to create each type\n self.periCreateMap = {}\n self.periCreateMap[\"peri:resourceSet\"] = \"createResourceSet\"\n self.periCreateMap[\"peri:resource\"] = \"createResource\"\n self.periCreateMap[\"peri:nodeList\"] = \"createNodeList\"\n self.periCreateMap[\"peri:node\"] = \"createNode\"\n self.periCreateMap[\"peri:memory\"] = \"createMemory\"\n self.periCreateMap[\"peri:cpu\"] = \"createCpu\"\n # what function is used to add an attribute to each type\n self.periAttrMap = {}\n self.periAttrMap[\"peri:memory\"] = \"setMemoryAttribute\"\n self.periAttrMap[\"peri:nodeList\"] = \"setNodeListAttribute\"\n self.periAttrMap[\"peri:node\"] = \"setNodeAttribute\"\n self.periAttrMap[\"peri:cpu\"] = \"setCpuAttribute\"\n # when we look up elements, what type is their \"name\"\n self.tagMap = {}\n self.tagMap['peri:cpu'] = (\"attribute\",\"index\")\n self.tagMap['peri:memory'] = (None,\"\")\n self.tagMap['peri:runrules'] = (None,\"\")\n self.tagMap['peri:resourceSet'] = (None,\"\")\n self.tagMap['peri:resource'] = (None,\"\")\n self.tagMap['peri:nodeList'] = (None,\"\")\n self.tagMap['peri:node'] = (\"element\",\"peri:nodeName\")\n\n\n def getRoot(self):\n return self.doc\n\n def PERI_nvp(self, name, value, parent):\n nv = self.doc.createElement(\"peri:nvp\")\n n = self.doc.createAttribute(\"name\")\n n.value = name\n v = self.doc.createTextNode(value)\n nv.appendChild(v)\n nv.setAttributeNode(n)\n if parent:\n parent.appendChild(nv)\n \n def PERI_person(self, parent=None, userName=None, realName=None, email=None, phone=None, group=None):\n person = self.doc.createElement(\"peri:person\")\n if userName:\n user = self.doc.createElement(\"user\")\n name = self.doc.createTextNode(userName)\n user.appendChild(name)\n person.appendChild(user)\n if realName:\n user = self.doc.createElement(\"realName\")\n name = self.doc.createTextNode(realName)\n user.appendChild(name)\n person.appendChild(user)\n if email:\n user = self.doc.createElement(\"email\")\n name = self.doc.createTextNode(email)\n user.appendChild(name)\n person.appendChild(user)\n if phone:\n user = self.doc.createElement(\"phone\")\n name = self.doc.createTextNode(phone)\n user.appendChild(name)\n person.appendChild(user)\n if group:\n user = self.doc.createElement(\"group\")\n name = self.doc.createTextNode(group)\n user.appendChild(name)\n person.appendChild(user)\n if parent:\n parent.appendChild(person)\n return person\n \n def PERI_operatingSystem(self, osName, osVersion, osRelease, parent):\n os = self.doc.createElement(\"peri:operatingSystem\")\n n = self.doc.createElement(\"name\")\n name = self.doc.createTextNode(osName)\n n.appendChild(name)\n os.appendChild(n)\n if osVersion:\n v = self.doc.createElement(\"version\")\n vers = self.doc.createTextNode(osVersion)\n v.appendChild(vers)\n os.appendChild(v)\n if osRelease:\n details = self.doc.createElement(\"peri:details\")\n self.PERI_nvp(\"release type\", osRelease, details)\n os.appendChild(details)\n if parent:\n parent.appendChild(os)\n return os\n \n def PERI_time(self, time=None, parent=None):\n timeElem = self.doc.createElement(\"peri:time\")\n t = self.doc.createAttribute(\"value\")\n if time:\n t.value = time\n else: \n t.value = (str(datetime.datetime.isoformat(datetime.datetime.now())),'.')[0]\n timeElem.setAttributeNode(t)\n if parent:\n parent.appendChild(timeElem)\n return timeElem\n\n def PERI_file(self, value, which, parent):\n fc = self.doc.createElement(\"peri:file\")\n if parent:\n parent.appendChild(fc)\n if which == \"abspath\":\n f = self.doc.createAttribute(\"abspath\")\n f.value = value\n fc.setAttributeNode(f)\n elif which == \"path-filename\":\n f = self.doc.createAttribute(\"path\")\n f.value = value\n fc.setAttributeNode(f)\n f = self.doc.createAttribute(\"filename\")\n parts = value.split('/') # extract the filename portion of the path\n f.value = parts[len(parts)-1]\n fc.setAttributeNode(f)\n return fc\n\n \n def genPERIheader(self):\n # get the XML document ready and initialized\n self.doc = xml.dom.minidom.Document()\n self.rootElem = self.doc.createElement(\"peri:runrules\")\n self.doc.appendChild(self.rootElem)\n # set namespace\n ns = self.doc.createAttribute(\"xmlns:peri\")\n ns.value = \"http://peri-scidac.org/\"\n self.rootElem.setAttributeNode(ns)\n # create id\n id = self.doc.createAttribute(\"id\")\n id.value = str(self.identifier)\n self.identifier += 1\n self.rootElem.setAttributeNode(id)\n # add time element to root\n self.PERI_time(None, self.rootElem)\n \n def createRun(self, name=None, parent=None):\n # create a peri:run element, if a parent element is sent in, we will use\n # that, otherwise, the run element becomes a child of root\n self.run = self.doc.createElement(\"peri:run\")\n if parent:\n parent.appendChild(self.run)\n else:\n self.rootElem.appendChild(self.run)\n return self.run\n\n def getRun(self):\n return self.run\n\n def createBuild(self, name=None, parent=None):\n # create peri:transformationSet and peri:transformation elements, \n # we are modeling the build as a transformation of type compile/link\n # if a parent element is sent in, we will use\n # that, otherwise, the run element becomes a child of root\n transE = self.doc.createElement(\"peri:transformationSet\")\n self.build = self.doc.createElement(\"peri:transformation\")\n transE.appendChild(self.build)\n # transformation type\n type = self.doc.createElement(\"type\")\n ty = self.doc.createTextNode(\"compile/link\")\n type.appendChild(ty)\n self.build.appendChild(type)\n if parent:\n parent.appendChild(transE)\n else:\n self.rootElem.appendChild(transE)\n return self.build\n\n def getBuild(self):\n return self.build\n\n def createCompiler(self, name, parent=None):\n # create a compiler, we are modeling compilers as a peri:resource\n # if a parent is sent in, we will use that,\n # otherwise, the compiler is a child of the build\n compiler = self.doc.createElement(\"peri:resource\")\n type = self.doc.createElement(\"type\")\n t = self.doc.createTextNode(\"compiler\")\n type.appendChild(t)\n compiler.appendChild(type)\n nme = self.doc.createElement(\"name\")\n n = self.doc.createTextNode(name)\n nme.appendChild(n)\n compiler.appendChild(nme)\n if parent:\n parent.appendChild(compiler)\n else:\n self.build.appendChild(compiler)\n return compiler\n\n #def setCompilerName(self, compiler, CompilerName):\n #nme = self.doc.createElement(\"name\")\n #n = self.doc.createTextNode(CompilerName)\n #nme.appendChild(n)\n #compiler.appendChild(nme)\n\n def setCompilerAttribute(self, compiler, nme, val):\n E = self.doc.createElement(nme)\n e = self.doc.createTextNode(val)\n E.appendChild(e)\n compiler.appendChild(E)\n\n def createLibrarySet(self, name=None, parent=None):\n #model library set as resource\n res = self.doc.createElement(\"peri:resource\")\n libs = self.doc.createElement(\"peri:libraries\")\n res.appendChild(libs)\n if parent:\n parent.appendChild(res)\n return libs\n \n def createLibrary(self, name, parent):\n lib = self.doc.createElement(\"peri:library\")\n self.PERI_file(name, \"path-filename\", lib)\n if parent:\n parent.appendChild(lib)\n return lib\n\n def setLibraryAttribute(self, lib, name, val):\n type = self.doc.createElement(name)\n t = self.doc.createTextNode(val)\n type.appendChild(t)\n lib.appendChild(type)\n\n \n def createTime(self,value, parent):\n time = self.PERI_time(value,parent)\n return time\n\n def createApplication(self, AppName, parent):\n # create a peri:program element, enclosed in a peri:resource\n res = self.doc.createElement(\"peri:resource\")\n prog = self.doc.createElement(\"peri:program\")\n self.PERI_nvp(\"name\", AppName, prog)\n res.appendChild(prog)\n parent.appendChild(res)\n return prog\n\n def setApplicationAttribute(self, app, name, val):\n self.PERI_nvp(name, val, app)\n\n def createPerson(self, userName, parent):\n # modeling a person as a resource\n res = self.doc.createElement(\"peri:resource\")\n person = self.PERI_person( res, userName)\n if parent:\n parent.appendChild(res)\n return person\n\n def createEnvironment(self, name=None, parent=None):\n env = self.doc.createElement(\"peri:environment\")\n parent.appendChild(env)\n return env\n\n def setEnvironmentAttribute(self,env, nme, val):\n self.PERI_nvp(nme, val, env)\n\n def createExecutable(self, exeName, parent):\n # modeling the executable as an output file of the build transformation\n oSet = self.doc.createElement(\"peri:outputs\")\n file = self.PERI_file(exeName, \"abspath\", oSet)\n if parent:\n parent.appendChild(oSet)\n return file\n\n def createMachineNode(self, nodeName, parent):\n # the machine is also a resource element\n res = self.doc.createElement(\"peri:resource\")\n node = self.createNode(nodeName, res)\n if parent:\n parent.appendChild(res)\n return node\n\n def createOperatingSystem(self, OSName, parent): \n # the build doesn't have an OS in it, so we model it as a \n # resource element. However, the run does have an OS, \n # so we don't need a resource element\n if parent == self.build:\n res = self.doc.createElement(\"peri:resource\")\n newParent = res\n parent.appendChild(newParent)\n else:\n newParent = parent\n os = self.doc.createElement(\"peri:operatingSystem\")\n n = self.doc.createElement(\"name\")\n name = self.doc.createTextNode(OSName)\n n.appendChild(name)\n os.appendChild(n)\n if newParent:\n newParent.appendChild(os)\n return os\n\n def setOperatingSystemAttribute(self, os, name, value):\n if name == \"version\":\n v = self.doc.createElement(name)\n vers = self.doc.createTextNode(value)\n v.appendChild(vers)\n os.appendChild(v)\n elif name == \"release type\":\n details = self.doc.createElement(\"peri:details\")\n self.PERI_nvp(name, value, details)\n os.appendChild(details)\n\n\n def createProgram(self, name, parent):\n prog = self.doc.createElement(\"peri:program\")\n n = self.doc.createElement(\"name\")\n v = self.doc.createTextNode(name)\n n.appendChild(v)\n prog.appendChild(n)\n if parent:\n parent.appendChild(prog)\n return prog\n\n def setProgramAttribute(self, prog, name, value):\n if name == \"version\":\n v = self.doc.createElement(name)\n ver = self.doc.createAttribute(\"number\")\n ver.value = value\n v.setAttributeNode(ver)\n prog.appendChild(v)\n\n def createScheduler(self, name, parent):\n sched = self.doc.createElement(\"peri:scheduler\")\n set = self.doc.createElement(\"peri:settings\")\n self.PERI_nvp(\"name\", name, set)\n sched.appendChild(set)\n if parent:\n parent.appendChild(sched)\n return sched\n\n def setSchedulerAttribute(self, sched, name, value):\n if name == \"version\" and value != \"\":\n [set] = sched.getElementsByTagName(\"peri:settings\")\n self.PERI_nvp(\"version\", value, set)\n \n def createQueue(self,name=None, parent=None):\n queue = self.doc.createElement(\"peri:queueContents\")\n if parent: \n parent.appendChild(queue)\n return queue\n\n def createSchedulerJob(self, name=None, parent=None):\n job = self.doc.createElement(\"peri:schedulerJob\")\n if parent:\n parent.appendChild(job)\n return job\n \n def setSchedulerJobAttribute(self, job, name, value):\n if name == \"jobid\":\n jobid = self.doc.createElement(\"jobid\")\n id = self.doc.createAttribute(\"id\")\n id.value = value\n jobid.setAttributeNode(id)\n job.appendChild(jobid)\n elif name == \"programName\":\n pgname = self.doc.createElement(\"programName\")\n pgn = self.doc.createTextNode(value)\n pgname.appendChild(pgn)\n job.appendChild(pgname)\n elif name == \"hoursRunning\":\n hours = self.doc.createElement(\"hoursRunning\")\n if value.find(\":\") >= 0:\n s = \"\"\n if value.count(\":\") == 2:\n h,m,s = value.split(\":\")\n elif value.count(\":\") == 1:\n h,m = value.split(\":\")\n ht = int(h) + float(m)/60.0\n if s != \"\":\n ht += float(s)/60.0/60.0\n elif value.strip() == \"N/A\":\n ht = 0.0\n else:\n ht = value\n hs = self.doc.createTextNode(str(ht))\n hours.appendChild(hs)\n job.appendChild(hours)\n elif name == \"status\":\n stats = self.doc.createElement(\"status\")\n sts = self.doc.createTextNode(value)\n stats.appendChild(sts)\n job.appendChild(stats)\n\n def createBatchFile(self, batchName, parent):\n # batch file is also modeled as a peri:resource\n res = self.doc.createElement(\"peri:resource\")\n bf = self.doc.createElement(\"batchFile\")\n name = self.doc.createElement(\"name\")\n n = self.doc.createTextNode(batchName)\n name.appendChild(n)\n bf.appendChild(name)\n res.appendChild(bf)\n if parent: \n parent.appendChild(res)\n return bf\n\n def setBatchFileAttribute(self, batch, name, value):\n reses = self.doc.createElement(name)\n rs = self.doc.createTextNode(value)\n reses.appendChild(rs)\n batch.appendChild(reses)\n\n def createFileSystemSet(self, name=None,parent=None):\n res = self.doc.createElement(\"peri:resource\")\n if parent: \n parent.appendChild(res)\n return res\n\n def createFileSystem(self, name, parent):\n fs = self.doc.createElement(\"fileSystem\")\n fsn = self.doc.createElement(\"name\")\n n = self.doc.createTextNode(name)\n fsn.appendChild(n)\n fs.appendChild(fsn)\n if parent:\n parent.appendChild(fs)\n return fs\n\n def setFileSystemAttribute(self, fs, name, value):\n if name == \"version\" and value != \"\":\n fsn = self.doc.createElement(name)\n n = self.doc.createTextNode(value)\n fsn.appendChild(n)\n fs.appendChild(fsn)\n\n def createDevice(self, name, parent):\n dev = self.doc.createElement(\"device\")\n devn = self.doc.createElement(\"name\")\n n = self.doc.createTextNode(name)\n devn.appendChild(n)\n dev.appendChild(devn)\n if parent: \n parent.appendChild(dev)\n return dev\n\n def addDeviceAttribute(self, dev, name, val):\n devn = self.doc.createElement(name)\n n = self.doc.createTextNode(val)\n devn.appendChild(n)\n dev.appendChild(devn)\n\n def createInputs(self, name=None, parent=None):\n iset = self.doc.createElement(\"peri:inputs\")\n if parent:\n parent.appendChild(iset)\n return iset\n \n def createFile(self, fullname, parent):\n file = self.PERI_file(fullname, \"abspath\", parent)\n return file\n\n def createResourceSet(self, name=None, parent=None):\n resSet = self.doc.createElement(\"peri:resourceSet\")\n if parent:\n parent.appendChild(resSet)\n return resSet\n\n def createResource(self, name=None, parent=None):\n res = self.doc.createElement(\"peri:resource\")\n if parent:\n parent.appendChild(res)\n return res\n\n def createNodeList(self, name=None, parent=None):\n res = self.doc.createElement(\"peri:nodeList\")\n if parent:\n parent.appendChild(res)\n return res\n\n def setNodeListAttribute(self, nl, name, val):\n if name == \"concurrency\":\n conc = self.doc.createAttribute(name)\n conc.value = val\n nl.setAttributeNode(conc)\n\n def createNode(self, nodeName=None, parent=None):\n node = self.doc.createElement(\"peri:node\")\n name = self.doc.createElement(\"peri:nodeName\")\n n = self.doc.createTextNode(nodeName)\n name.appendChild(n)\n node.appendChild(name)\n if parent:\n parent.appendChild(node)\n return node\n\n def setNodeAttribute(self, node, name, val):\n self.PERI_nvp(name, val, node)\n \n def createMemory(self, name=None, parent=None):\n mem = self.doc.createElement(\"peri:memory\")\n if parent:\n parent.appendChild(mem)\n return mem\n\n def setMemoryAttribute(self, mem, name, val):\n if name == \"mainKB\":\n mainE = self.doc.createElement(name)\n main = self.doc.createTextNode(val)\n mainE.appendChild(main)\n mem.appendChild(mainE)\n elif name.find(\"cacheKB\") >= 0:\n # a hack... name could be \"L1 cacheKB\" or \"L2 cacheKB\"\n level = \"\"\n if name.upper().startswith(\"L\"): # tell us the level?\n level,name = name.split(\" \")\n E = self.doc.createElement(name)\n if level:\n lev = self.doc.createAttribute(\"level\")\n lev.value = level\n E.setAttributeNode(lev)\n e = self.doc.createTextNode(val)\n E.appendChild(e)\n mem.appendChild(E)\n\n def createCpu(self, name=None, parent=None):\n res = self.doc.createElement(\"peri:cpu\")\n index = self.doc.createAttribute(\"index\")\n index.value = name \n res.setAttributeNode(index)\n if parent:\n parent.appendChild(res)\n return res\n\n def setCpuAttribute(self, cpu, name, val):\n if name == \"MHz\":\n mhzE = self.doc.createElement(name)\n mhz = self.doc.createTextNode(val)\n mhzE.appendChild(mhz)\n cpu.appendChild(mhzE)\n \n def findElement(self, tagName, parent, ident):\n #print \"searching for: %s %s %s\" % (tagName,parent,ident)\n children = self.rootElem.getElementsByTagName(tagName)\n for child in children: \n if child.parentNode == parent:\n if ident == None or ident == \"Unknown\":\n return child # assume only one, so there's no identifier\n ret = self.tagMap[tagName]\n if ret == None:\n #print \"%s: not in self.tagMap\" % tagName\n continue\n type,name = ret\n if type == None:\n return child\n elif type == \"attribute\":\n id = child.getAttribute(name)\n if id == ident:\n return child\n elif type == \"element\":\n chldlist = child.childNodes\n #print chldlist\n for u in chldlist: \n #print \"NodeName: %s\" % u.nodeName\n if u.nodeName == name:\n if u.childNodes[0].nodeValue == ident:\n return child\n\n return None\n\n\n def createPERIelement(self, nameHier, typeHier):\n parent = self.rootElem\n nameHier = nameHier.lstrip(\"/\")\n typeHier = typeHier.lstrip(\"/\")\n for name, type in zip(nameHier.split(\"/\"), typeHier.split(\"/\")):\n el = self.findElement(type, parent, name)\n if not el:\n cFunc = self.periCreateMap[type]\n cf = getattr(self,cFunc)\n el = cf(name, parent)\n parent = el\n \n\n def addAttribute(self, nameHier, typeHier, attrName, attrValue):\n parent = self.rootElem\n nameHier = nameHier.lstrip(\"/\")\n typeHier = typeHier.lstrip(\"/\")\n el = None\n elType = \"\"\n for name, type in zip(nameHier.split(\"/\"), typeHier.split(\"/\")):\n el = self.findElement(type, parent, name)\n if not el:\n print \"ERROR: could not find parent of: %s \" % name\n return\n parent = el\n elType = type\n aFunc = self.periAttrMap[elType]\n af = getattr(self,aFunc)\n af(el, attrName, attrValue)\n\n def writeData(self, fileName):\n oF = open(fileName,'w')\n oF.write(self.doc.toprettyxml(encoding='utf-8'))\n oF.close()\n\n\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1581,"cells":{"__id__":{"kind":"number","value":19524921345338,"string":"19,524,921,345,338"},"blob_id":{"kind":"string","value":"c6ffaa433bc7fc63407e3fb94e42c6994759239b"},"directory_id":{"kind":"string","value":"547aafad1a12f1ca9c2ce0e963839261b2c79abf"},"path":{"kind":"string","value":"/localsms/utils.py"},"content_id":{"kind":"string","value":"0a715c36b669e1c300faa1eb347cea56b1930eaa"},"detected_licenses":{"kind":"list like","value":["Apache-2.0"],"string":"[\n \"Apache-2.0\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"SEL-Columbia/localsms"},"repo_url":{"kind":"string","value":"https://github.com/SEL-Columbia/localsms"},"snapshot_id":{"kind":"string","value":"9894384a6073cc9b85e77bbbdc069d5d96b56da8"},"revision_id":{"kind":"string","value":"617042be7909e4c34c754ff1d8f5ee47cf70e93e"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-12-24T14:46:04.533376","string":"2020-12-24T14:46:04.533376"},"revision_date":{"kind":"timestamp","value":"2011-02-25T13:20:49","string":"2011-02-25T13:20:49"},"committer_date":{"kind":"timestamp","value":"2011-02-25T13:20:49","string":"2011-02-25T13:20:49"},"github_id":{"kind":"number","value":1034118,"string":"1,034,118"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import sys\nimport logging\nimport datetime \nimport ConfigParser\nimport urllib2\nfrom serial.serialutil import SerialException\nimport pygsm \nimport httplib2\nfrom localsms.db import ModemLog\n\n\ndef make_logger(config=None,name=None): \n log = logging.getLogger(name) \n log.setLevel(logging.DEBUG)\n ch = logging.FileHandler(config.get(\"app\",\"log_file\"))\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(\n logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"))\n log.addHandler(ch)\n return log\n\n \ndef ping_remote(config=None,log=None):\n \"\"\"\n Check to see if the remote server is runnning.\n \"\"\"\n try: \n response = urllib2.urlopen(\n \"http://%s:%s/sms/ping\" % (\n config.get(\"remote\",\"host\"),\n config.get(\"remote\",\"port\")))\n if response.code == 200: # make sure response is a 200 not 405 \n return True\n else: \n return False \n except Exception,e: \n log.error(e) \n return False\n\n \ndef make_modem_log(modem,msg,msgType): \n ModemLog(time=str(datetime.datetime.now()),\n modem=str(modem),\n msg=str(msg),\n msgType=str(msgType))\n\ndef get_modem(config,log): \n try:\n log.info(\"Trying to connect to the modem\")\n return pygsm.GsmModem(\n port=config.get(\"modem\",\"port\"),\n logger=make_modem_log,\n baudrate=config.get(\"modem\",\"baudrate\"))\n except SerialException,e:\n log.error(\"Unable to conntect to the modem %s \"% e)\n sys.exit(0)\n\ndef get_config(path): \n config = ConfigParser.RawConfigParser()\n config.read(path) \n return config \n\ndef make_http(config): \n h = httplib2.Http() \n h.add_credentials(\n config.get(\"remote\",\"username\"),\n config.get(\"remote\",\"password\"))\n return h \n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1582,"cells":{"__id__":{"kind":"number","value":8615704431166,"string":"8,615,704,431,166"},"blob_id":{"kind":"string","value":"f115244db39a0d62a37086a4f21debb47b38c5d3"},"directory_id":{"kind":"string","value":"8fcbc53097d1e468829985fe83a74a2f3f8abf35"},"path":{"kind":"string","value":"/smxasn_invoices.py"},"content_id":{"kind":"string","value":"f48b935223b9a4a4fb3b6fd4ea54488440829b48"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"OrlandoHdz/SmxAsn"},"repo_url":{"kind":"string","value":"https://github.com/OrlandoHdz/SmxAsn"},"snapshot_id":{"kind":"string","value":"354a09bed5a797115604efabacde822808e34178"},"revision_id":{"kind":"string","value":"a0982ef432a6bfe510730a8451771fd9fb882bd7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-19T09:20:17.818047","string":"2020-05-19T09:20:17.818047"},"revision_date":{"kind":"timestamp","value":"2014-05-21T19:10:16","string":"2014-05-21T19:10:16"},"committer_date":{"kind":"timestamp","value":"2014-05-21T19:10:16","string":"2014-05-21T19:10:16"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\nimport pyodbc\nfrom meritor_pdf import smx_pdf\nfrom ntlm.smtp import ntlm_authenticate\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\nimport json\n\nclass invoices(object):\n\n\t\"\"\"\n\t\tObtiene las facturas del cliente para enviarlo por correo\n\t\tOrlando Hdz\n\t\t09-May-2014\n\t\"\"\"\n\n\tdef __init(self):\n\t\tself.path_pdfs = \"\"\n\t\tself.dsn = \"\"\n\t\tself.user = \"\"\n\t\tself.password = \"\"\n\t\tself.database = \"\"\n\t\tself.clientes = \"0\"\n\t\tself.address_book = []\n\t\tself.sender = \"\"\n\t\tself.smtp_host = \"\"\n\t\tself.smtp_usuario = \"\"\n\t\tself.smtp_password =\"\"\n\n\n\n\tdef envia_mail(self, archivo):\n\t\tprint \"enviando mail\"\n\n\t\tsubject = \"New Commercial Invoice\"\n\t\tbody = \"Sisamex, has generated a new commercial invoice \\n Attached file\"\n\t\t\n\n\t\tmsg = MIMEMultipart() \n\t\tmsg['From'] = self.sender\n\t\tmsg['To'] = ','.join(self.address_book)\n\t\tmsg['Subject'] = subject\n\t\tmsg.attach(MIMEText(body, 'plain'))\n\t\tpart = MIMEApplication(open(self.path_pdfs + \"/\" + archivo,\"rb\").read())\n\t\tpart.add_header('Content-Disposition', 'attachment', filename=archivo)\n\t\tmsg.attach(part)\n\t\ttext=msg.as_string()\n\n\t\tconnection = smtplib.SMTP(self.smtp_host, 25)\n\t\tconnection.ehlo()\n\t\tntlm_authenticate(connection, self.smtp_usuario, self.smtp_password)\n\t\tconnection.sendmail(self.sender,self.address_book, text)\n\t\tconnection.quit()\t\t\n\n\n\tdef run(self):\n\t\tcon_string = 'DSN=%s;UID=%s;PWD=%s;DATABASE=%s;' % (self.dsn, self.user, self.password, self.database)\n\t\tcnxn = pyodbc.connect(con_string)\n\t\tcursor_mst = cnxn.cursor()\n\n\t\t#obtiene las facturas que tengan registro de vigilancia \n\t\t#tambien que no se hallan enviado anteriormente\n\t\tprint self.clientes\n\t\tcursor_mst.execute(\"\"\"\n\t\t\t\t\t\tselect \n\t\t\t\t\t\t factura, cliente \n\t\t\t\t\t\tfrom \n\t\t\t\t\t\t asn_embarque_enca as a \n\t\t\t\t\t\twhere \n\t\t\t\t\t\t a.cliente in (\"\"\" + self.clientes + \"\"\") \n\t\t\t\t\t\t and a.cancelada = 'F' \n\t\t\t\t\t\t and a.vigilancia_tiempo is not null \n\t\t\t\t\t\t and not exists \n\t\t\t\t\t\t ( select * \n\t\t\t\t\t\t from asn_facturas_enviadas as b \n\t\t\t\t\t\t where a.factura = b.factura)\t\"\"\")\n\t\trows_mst = cursor_mst.fetchall()\n\t\tfor row_mst in rows_mst:\n\t\t\t#obtiene los encabezados\n\t\t\tif row_mst.factura > 0:\n\t\t\t\t\tcursor = cnxn.cursor()\n\t\t\t\t\tprint 'creando factura %d' % row_mst.factura\n\t\t\t\t\tcursor.execute(\"\"\"\n\t\t\t\t\t\t\tselect\n\t\t\t\t\t\t\t\tconvert(varchar,fecha,103) as fecha,\n\t\t\t\t\t\t\t isnull(nombre,'') as slodto_r1,\n\t\t\t\t\t\t\t isnull(dat1,'') as slodto_r2,\n\t\t\t\t\t\t\t isnull(dat2,'') as slodto_r3,\n\t\t\t\t\t\t\t isnull(dat3,'') as slodto_r4,\n\t\t\t\t\t\t\t isnull(dat4,'') as slodto_r5,\n\t\t\t\t\t\t\t isnull(embarcadoa,'') as shipto_r1,\n\t\t\t\t\t\t\t isnull(emb_dir1,'') as shipto_r2,\n\t\t\t\t\t\t\t isnull(emb_dir2,'') as shipto_r3,\n\t\t\t\t\t\t\t isnull(emb_dir3,'') as shipto_r4,\n\t\t\t\t\t\t\t isnull(emb_dir4,'') as shipto_r5,\n\t\t\t\t\t\t\t isnull(dat1_mex,'') as aduana_r1,\n\t\t\t\t\t\t\t isnull(dat2_mex,'') as aduana_r2,\n\t\t\t\t\t\t\t isnull(dat3_mex,'') as aduana_r3,\n\t\t\t\t\t\t\t isnull(dat4_mex,'') as aduana_r4,\n\t\t\t\t\t\t\t isnull(dat5_mex,'') as aduana_r5,\n\t\t\t\t\t\t\t isnull(dat1_usa,'') as broker_r1,\n\t\t\t\t\t\t\t isnull(dat2_usa,'') as broker_r2,\n\t\t\t\t\t\t\t isnull(dat3_usa,'') as broker_r3,\n\t\t\t\t\t\t\t isnull(dat4_usa,'') as broker_r4,\n\t\t\t\t\t\t\t isnull(dat5_usa,'') as broker_r5,\n\t\t\t\t\t\t\t isnull(embarque_ref,'') as shipping_order,\n\t\t\t\t\t\t\t convert(varchar,fecha,103) as shipping_date,\n\t\t\t\t\t\t\t isnull(transporte,'') as carrier,\n\t\t\t\t\t\t\t isnull(numero_camion,'') as bl_number,\n\t\t\t\t\t\t\t isnull(terminos_vta,'') as commercial_terms,\n\t\t\t\t\t\t\t isnull(pedimento,'') as clave_pedimento,\n\t\t\t\t\t\t\t isnull(peso_um,'') as peso_um,\n\t\t\t\t\t\t\t isnull(moneda,'') as moneda\n\t\t\t\t\t\t \tfrom \n\t\t\t\t\t\t \tv_factura_reporte\n\t\t\t\t\t\t \twhere \n\t\t\t\t\t\t \tseq=1 \n\t\t\t\t\t\t \tand factura=? \"\"\", row_mst.factura)\n\n\t\t\t\t\trow = cursor.fetchone()\n\t\t\t\t\tpdf = smx_pdf()\n\t\t\t\t\tif row:\n\t\t\t\t\t\tpdf.ruta_destino = self.path_pdfs\n\t\t\t\t\t\tpdf.factura = str(row_mst.factura)\n\t\t\t\t\t\tpdf.fecha = row.fecha\n\t\t\t\t\t\tpdf.sold_to_r1 = row.slodto_r1\n\t\t\t\t\t\tpdf.sold_to_r2 = row.slodto_r2\n\t\t\t\t\t\tpdf.sold_to_r3 = row.slodto_r3\n\t\t\t\t\t\tpdf.sold_to_r4 = row.slodto_r4\n\t\t\t\t\t\tpdf.sold_to_r5 = row.slodto_r5\n\t\t\t\t\t\tif len(row.shipto_r3) > 40:\n\t\t\t\t\t\t\trow.shipto_r4 = row.shipto_r1[40:len(row.shipto_r3)]\n\t\t\t\t\t\t\trow.shipto_r3 = row.shipto_r1[0:39]\n\t\t\t\t\t\tpdf.ship_to_r1 = row.shipto_r1\n\t\t\t\t\t\tpdf.ship_to_r2 = row.shipto_r2\n\t\t\t\t\t\tpdf.ship_to_r3 = row.shipto_r3\n\t\t\t\t\t\tpdf.ship_to_r4 = row.shipto_r4\n\t\t\t\t\t\tpdf.ship_to_r5 = row.shipto_r5\n\t\t\t\t\t\tpdf.agente_aduanal_r1 = row.aduana_r1\n\t\t\t\t\t\tpdf.agente_aduanal_r2 = row.aduana_r2\n\t\t\t\t\t\tpdf.agente_aduanal_r3 = row.aduana_r3\n\t\t\t\t\t\tpdf.agente_aduanal_r4 = row.aduana_r4\n\t\t\t\t\t\tpdf.agente_aduanal_r5 = row.aduana_r5\n\t\t\t\t\t\tpdf.us_broker_r1 = row.broker_r1\n\t\t\t\t\t\tpdf.us_broker_r2 = row.broker_r2\n\t\t\t\t\t\tpdf.us_broker_r3 = row.broker_r3\n\t\t\t\t\t\tpdf.us_broker_r4 = row.broker_r4\n\t\t\t\t\t\tpdf.us_broker_r5 = row.broker_r5\n\t\t\t\t\t\tpdf.shipping_order = str(row.shipping_order)\n\t\t\t\t\t\tpdf.shipping_date = row.shipping_date\n\t\t\t\t\t\tpdf.carrier = row.carrier\n\t\t\t\t\t\tpdf.bl_number = str(row.bl_number)\n\t\t\t\t\t\tpdf.comercial_terms = row.commercial_terms\n\t\t\t\t\t\tpdf.clave_pedimento = row.clave_pedimento\n\t\t\t\t\t\tpdf.peso_um = row.peso_um\n\t\t\t\t\t\tpdf.moneda = row.moneda\n\n\n\t\t\t\t\t#obtiene las partidas\n\t\t\t\t\tcursor.close()\n\t\t\t\t\tcursor = cnxn.cursor()\n\t\t\t\t\tcursor.execute(\"\"\"\n\t \t\t\t\t\tselect\n\t \t\t\t\t\t\tseq,\n\t\t\t\t\t\t\t\t\t isnull(parte_cliente,'') as parte_no,\n\t\t\t\t\t\t\t\t\t isnull(descripcion,'') as descripcion,\n\t\t\t\t\t\t\t\t\t isnull(descripcion_usa,'') as descripcion2,\n\t\t\t\t\t\t\t\t\t isnull(pais_origen,'') as pais_origen,\n\t\t\t\t\t\t\t\t\t isnull(cant,0) as cantidad,\n\t\t\t\t\t\t\t\t\t isnull(peso,0) as peso,\n\t\t\t\t\t\t\t\t\t isnull(precio,0) as precio,\n\t\t\t\t\t\t\t\t\t isnull(total,0) as total,\n\t\t\t\t\t\t\t\t\t isnull(orden_compra,'') as orden_compra\n\t\t\t\t\t\t\t\t\t from \n\t\t\t\t\t\t\t\t\t v_factura_reporte\n\t\t\t\t\t\t\t\t\t where \n\t\t\t\t\t\t\t\t\t factura=?\n\t\t\t\t\t\t\t\t\t order by seq\n\t\t\t\t\t\t\t\t\t\t\"\"\",row_mst.factura)\n\t\t\t\t\trows = cursor.fetchall()\n\t\t\t\t\tpartidas = {}\n\t\t\t\t\tif rows:\n\t\t\t\t\t\tfor row in rows:\n\t\t\t\t\t\t\tdetalle = []\n\t\t\t\t\t\t\tif row.seq != 99:\n\t\t\t\t\t\t\t\tdetalle.append(row.seq)\n\t\t\t\t\t\t\t\tdetalle.append(row.parte_no)\n\t\t\t\t\t\t\t\tdetalle.append(row.descripcion)\n\t\t\t\t\t\t\t\tdetalle.append(row.descripcion2 + ' PO: ' + row.orden_compra)\n\t\t\t\t\t\t\t\tdetalle.append(row.pais_origen)\n\t\t\t\t\t\t\t\tdetalle.append(str(row.cantidad))\n\t\t\t\t\t\t\t\tdetalle.append(str(row.peso))\n\t\t\t\t\t\t\t\tdetalle.append(str(row.precio))\n\t\t\t\t\t\t\t\tdetalle.append(str(row.total))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdetalle.append(row.seq+1)\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append(row.descripcion)\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append('')\n\t\t\t\t\t\t\t\tdetalle.append('')\n\n\t\t\t\t\t\t\tpartidas[row.parte_no] = detalle\n\n\n\n\t\t\t\t\t\tcursor.close()\n\n\t\t\t\t\t\tpdf.partidas = partidas\n\n\t\t\t\t\t\t#esto se va implementar para engranes donde para los empaques\n\t\t\t\t\t\t#obtener el peso total\n\t\t\t\t\t\t#print 'obtiene el peso total'\n\t\t\t\t\t\t#cursor = cnxn.cursor()\n\t\t\t\t\t\t#cursor.execute(\"exec pg_gn_peso_total ?,0,0\",factura)\n\t\t\t\t\t\t#row = cursor.fetchone()\n\t\t\t\t\t\t#if row:\n\t\t\t\t\t\t#\tpdf.peso_total = str(row[0])\n\t\t\t\t\t\t#cursor.close()\n\n\t\t\t\t\t\tpdf.build_pdf()\n\n\t \n\t\t\t\t\t\t#registrar la factura \n\t\t\t\t\t\tcursor = cnxn.cursor()\n\t\t\t\t\t\tcursor.execute(\"\"\"\n\t\t \t\t\t\t\t\tinsert into asn_facturas_enviadas \n\t\t \t\t\t\t\t(compania, cliente_mapics, factura, fecha_enviada) \t\n\t\t \t\t\t\t\tvalues (?,?,?, getdate())\n\t\t \t\t\t\t\t\"\"\",72,row_mst.cliente,row_mst.factura)\n\t\t\t\t\t\tcursor.commit()\n\t\t\t\t\t\tcursor.close()\n\n\t\t\t\t\t\t#envia el mail\n\t\t\t\t\t\t#invoices.saludo(self)\n\t\t\t\t\t\tinvoices.envia_mail(self,pdf.archivo_salida)\n\n\t\tcursor_mst.close()\n\n\n\n\n\nif __name__ == \"__main__\":\n\n\t#Carga los parametros\n\twith open('parametros_smx.json') as data_file:\n\t\tdata = json.load(data_file)\n\n\t#Instanciando factura\n\toInvoices = invoices()\n\toInvoices.dsn = data[\"parametros\"][\"dsn\"]\n\toInvoices.user = data[\"parametros\"][\"user\"]\n\toInvoices.password = data[\"parametros\"][\"password\"]\n\toInvoices.database = data[\"parametros\"][\"database\"]\n\toInvoices.clientes = data[\"parametros\"][\"clientes\"]\n\toInvoices.path_pdfs = data[\"parametros\"][\"path_pdfs\"]\n\toInvoices.address_book = data[\"parametros\"][\"address_book\"]\n\toInvoices.sender = data[\"parametros\"][\"smtp_sender\"]\n\toInvoices.smtp_host = data[\"parametros\"][\"smtp_host\"]\n\toInvoices.smtp_usuario = data[\"parametros\"][\"smtp_usuario\"]\n\toInvoices.smtp_password = data[\"parametros\"][\"smtp_password\"]\n\n\toInvoices.run()\n\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1583,"cells":{"__id__":{"kind":"number","value":16561393922273,"string":"16,561,393,922,273"},"blob_id":{"kind":"string","value":"8400d954530afbffdc052f56ad59289eb5804a86"},"directory_id":{"kind":"string","value":"f8bf519a783b0cf97d467f4459fa185a11900129"},"path":{"kind":"string","value":"/src/tests/task.py"},"content_id":{"kind":"string","value":"25f41256b77aa2f78a6b3090548741b4ef8dc784"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"VirtUalProtoss/daemonServus"},"repo_url":{"kind":"string","value":"https://github.com/VirtUalProtoss/daemonServus"},"snapshot_id":{"kind":"string","value":"8ee59686deda44a72044d65a9b1a055c4b774f08"},"revision_id":{"kind":"string","value":"59ee602bd60f639997635bd2f17072466b57e8b2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T18:35:10.638733","string":"2016-09-06T18:35:10.638733"},"revision_date":{"kind":"timestamp","value":"2013-04-18T01:11:44","string":"2013-04-18T01:11:44"},"committer_date":{"kind":"timestamp","value":"2013-04-18T01:11:44","string":"2013-04-18T01:11:44"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n\"\"\"\nTask test module.\"\n\"\"\"\n__author__ = 'virtual'\n\nfrom controllers.taskController import taskController\nimport os\nfrom functions import walkDir, getSubnet, getIp\nfrom components.application import application\nfrom components.configBuilder import configBuilder\n\n\nclass TestTask:\n\n def appConfigure(self):\n self.app = application()\n self.app.setup()\n self.app.config = configBuilder( self.app.appDir + os.sep + 'config' )\n self.app.config.buildHostConfig( getSubnet(), getIp() )\n\n def test_init(self):\n self.appConfigure()\n tasksDir = self.app.appDir + os.sep + 'tasks'\n tc = taskController( tasksDir )\n tc.tasks = tc.getActiveTasks( walkDir( tasksDir ) )\n tc.buildDependencyTree( tc.tasks )\n tc.prepareEvents( tc.tasks )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1584,"cells":{"__id__":{"kind":"number","value":9191230040638,"string":"9,191,230,040,638"},"blob_id":{"kind":"string","value":"2b763b7bd48b65426eab1d7c8cb6d8a72d0579a2"},"directory_id":{"kind":"string","value":"0dcf4f140a2e1434ca448679380f335dfd8a2fbd"},"path":{"kind":"string","value":"/setup.py"},"content_id":{"kind":"string","value":"59e1a400304bb15abd799f92e72295da2d73324b"},"detected_licenses":{"kind":"list like","value":["BSD-3-Clause"],"string":"[\n \"BSD-3-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"dmatthes/django-bmf"},"repo_url":{"kind":"string","value":"https://github.com/dmatthes/django-bmf"},"snapshot_id":{"kind":"string","value":"d97f36d4f5225c8fcadeb8211c5fab16e43c119b"},"revision_id":{"kind":"string","value":"3a97167de7841b13f1ddd23b33ae65e98dc49dfd"},"branch_name":{"kind":"string","value":"refs/heads/develop"},"visit_date":{"kind":"timestamp","value":"2020-05-29T11:00:50.829417","string":"2020-05-29T11:00:50.829417"},"revision_date":{"kind":"timestamp","value":"2014-10-09T13:41:52","string":"2014-10-09T13:41:52"},"committer_date":{"kind":"timestamp","value":"2014-10-09T13:41:52","string":"2014-10-09T13:41:52"},"github_id":{"kind":"number","value":23787808,"string":"23,787,808"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":true,"string":"true"},"gha_event_created_at":{"kind":"timestamp","value":"2014-10-09T13:41:52","string":"2014-10-09T13:41:52"},"gha_created_at":{"kind":"timestamp","value":"2014-09-08T11:07:29","string":"2014-09-08T11:07:29"},"gha_updated_at":{"kind":"timestamp","value":"2014-09-08T12:36:14","string":"2014-09-08T12:36:14"},"gha_pushed_at":{"kind":"timestamp","value":"2014-10-09T13:41:52","string":"2014-10-09T13:41:52"},"gha_size":{"kind":"number","value":1413,"string":"1,413"},"gha_stargazers_count":{"kind":"number","value":0,"string":"0"},"gha_forks_count":{"kind":"number","value":0,"string":"0"},"gha_open_issues_count":{"kind":"number","value":0,"string":"0"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# ex:set fileencoding=utf-8:\n\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages, Command\n\nfrom djangobmf import __author__, __contact__, __homepage__\n\nCLASSIFIERS = [\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'Topic :: Office/Business :: Groupware',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n]\n\n# Dynamically calculate the version\nversion = __import__('djangobmf').get_version()\n\nsetup(\n name='django-bmf',\n version=version,\n url=__homepage__,\n license='BSD',\n platforms=['OS Independent'],\n description='Business Management Framework with integrated ERP solution written for django',\n long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),\n author=__author__,\n author_email=__contact__,\n packages=find_packages(exclude=['sandbox']),\n classifiers=CLASSIFIERS,\n install_requires=[\n 'django',\n 'pytz',\n 'Pillow',\n 'django-sekizai',\n 'django-mptt',\n 'django-filter',\n 'reportlab',\n 'xhtml2pdf',\n 'markdown',\n ],\n include_package_data=True,\n zip_safe=False,\n test_suite='run_tests.main',\n tests_require = [\n# 'coverage',\n# 'pep8',\n ],\n)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1585,"cells":{"__id__":{"kind":"number","value":8744553425204,"string":"8,744,553,425,204"},"blob_id":{"kind":"string","value":"48951c886c0354d6c6c812024a7fb2fb6142f2e6"},"directory_id":{"kind":"string","value":"19c723f61451db78c8bb58f4e03ab6d8e42c4306"},"path":{"kind":"string","value":"/views/models.py"},"content_id":{"kind":"string","value":"04909e1ecd1457e69cbe9be49a74f90dd13b97fb"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"kingheaven/davidpaste.com"},"repo_url":{"kind":"string","value":"https://github.com/kingheaven/davidpaste.com"},"snapshot_id":{"kind":"string","value":"29ed98d3065556b98cb70b811cea3cc66292166b"},"revision_id":{"kind":"string","value":"4b32098062433c381e11574c596a672411a3fcf7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-10T19:09:57.290968","string":"2021-01-10T19:09:57.290968"},"revision_date":{"kind":"timestamp","value":"2011-11-27T13:08:33","string":"2011-11-27T13:08:33"},"committer_date":{"kind":"timestamp","value":"2011-11-27T13:08:33","string":"2011-11-27T13:08:33"},"github_id":{"kind":"number","value":554455,"string":"554,455"},"star_events_count":{"kind":"number","value":8,"string":"8"},"fork_events_count":{"kind":"number","value":1,"string":"1"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n#-*-coding:utf-8-*-\nfrom sqlalchemy import Column, Integer, String, DateTime, Text\nfrom sqlalchemy import Table, MetaData, ForeignKey\nfrom sqlalchemy.orm import relationship, backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom database import engine\nfrom datetime import datetime\nimport hashlib\nimport random\n\n__all__ = ['User', 'Syntax', 'Paste', 'Tag']\n\nBase = declarative_base()\nmetadata = Base.metadata\n\npaste_user = Table('pastes_users', metadata,\n Column('paste_id', Integer, ForeignKey('pastes.id')),\n Column('user_id', Integer, ForeignKey('users.id')),\n )\n\nclass Syntax(Base):\n __tablename__ = 'syntax'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(45)) # 显示的名字\n syntax = Column(String(45)) # pygments用的\n\n def __init__(self, name, syntax):\n self.name = name\n self.syntax = syntax\n\n def __repr__(self):\n return \"\" % self.name\n\npaste_tag = Table('pastes_tags', metadata,\n Column('paste_id', Integer, ForeignKey('pastes.id')),\n Column('tag_id', Integer, ForeignKey('tags.id')),\n )\n\nclass Tag(Base):\n __tablename__ = 'tags'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(45), unique=True)\n times = Column(Integer(11), default=1)\n\n def __init__(self, name):\n self.name = name.lower()\n\n def __repr__(self):\n return \"Tag <%s>\" % self.name\n\nclass User(Base):\n __tablename__ = 'users'\n\n id = Column(Integer, primary_key=True)\n email = Column(String(45), unique=True) # 登陆使用的\n nickname = Column(String(45)) # 显示时用的\n password = Column(String(45))\n paste_num = Column(Integer, default=0)\n created_time = Column(DateTime, default=datetime.now())\n modified_time = Column(DateTime, default=datetime.now())\n\n favourites = relationship('Paste', secondary=paste_user, order_by='Paste.created_time', backref=\"users\")\n\n def __init__(self, nickname, email, password):\n self.nickname = nickname\n self.email = email\n self.password = hashlib.md5(password).hexdigest()\n\n def __repr__(self):\n return \"\" % (self.nickname, self.email)\n\nclass Paste(Base):\n __tablename__ = 'pastes'\n\n id = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('users.id'))\n syntax_id = Column(Integer, ForeignKey('syntax.id'))\n title = Column(String(45), default=u'未知标题')\n content = Column(Text)\n views = Column(Integer, default=0)\n created_time = Column(DateTime, default=datetime.now())\n modified_time = Column(DateTime, default=datetime.now())\n\n user = relationship(User, backref=backref('pastes'))\n syntax = relationship(Syntax, backref=backref('pastes'))\n tags = relationship('Tag', secondary=paste_tag, order_by=Tag.name, backref=\"pastes\")\n\n def __init__(self, syntax_id, title, content):\n self.user_id = None\n self.syntax_id = syntax_id\n self.title = title\n self.content = content\n\n def __repr__(self):\n return \"\" % (self.title, self.user_id)\n\n def isFavourited(self, user):\n return self in user.favourites\n\nif __name__ == '__main__':\n from database import db_session\n metadata.create_all(engine)\n\n \"\"\"\n syntax_dict = {'python':'Python',\n 'c':'C',\n 'html':('HTML', 'XHTML'),\n 'javascript':('JavaScript', 'JScript'),\n 'css':'CSS',\n 'actionscript':'ActionScript',\n 'applescript':'AppleScript',\n 'awk':'Awk',\n 'erlang':'Erlang',\n 'delphi':'Delphi',\n 'groovy':'Groovy',\n 'haskell':'Haskell',\n 'lua':'Lua',\n 'objective-c':'Objective-C',\n 'php':'PHP',\n 'perl':'Perl',\n 'ruby':'Ruby',\n 'scala':'Scala',\n 'sql':'SQL',\n 'diff':'Diff Files',\n 'xml':'XML',\n 'yaml':'YAML',\n 'java': 'JAVA',\n 'bash':'Bash',\n 'c#':'C#'}\n\n keys = syntax_dict.keys()\n keys.sort()\n for key in keys:\n value = syntax_dict[key]\n if isinstance(value, tuple):\n for name in value:\n syntax = Syntax(name, key)\n db_session.add(syntax)\n if isinstance(value, str):\n syntax = Syntax(value, key)\n db_session.add(syntax)\n db_session.commit()\n\n password = ''.join([random.choice('abcdefghij') for i in range(10)])\n user = User(u'未知用户', 'unknown@davidpaste.com', hashlib.md5(password).hexdigest())\n db_session.add(user)\n db_session.commit()\n \"\"\"\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1586,"cells":{"__id__":{"kind":"number","value":19370302537365,"string":"19,370,302,537,365"},"blob_id":{"kind":"string","value":"f7d3ff8911f5f58f8f60d0ce074575306b9ee54b"},"directory_id":{"kind":"string","value":"8133f07fe57be88dcb412d51706f8448d74643c1"},"path":{"kind":"string","value":"/src/wxyz/libxml_test.py"},"content_id":{"kind":"string","value":"12a4dfd13a44d3d51c921dbfeee30e48092ea66a"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"xtman/mfclient"},"repo_url":{"kind":"string","value":"https://github.com/xtman/mfclient"},"snapshot_id":{"kind":"string","value":"97d2d45312ebdd9284ac8a462a9f4607cfc68313"},"revision_id":{"kind":"string","value":"8957e2ef159b85691b2f607f4f0374da0eb462e2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2015-08-13T17:57:28.529883","string":"2015-08-13T17:57:28.529883"},"revision_date":{"kind":"timestamp","value":"2014-09-30T14:12:15","string":"2014-09-30T14:12:15"},"committer_date":{"kind":"timestamp","value":"2014-09-30T14:12:15","string":"2014-09-30T14:12:15"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"'''\nCreated on 31 Aug 2014\n\n@author: wilson\n'''\nimport unittest\nimport os\nimport yx.libxml as XML\n\nclass Test(unittest.TestCase):\n \n def setUp(self):\n self.xml_string = '1234'\n self.xml_file_path = '/tmp/__test_xml_1976__.xml'\n with open(self.xml_file_path, 'w') as f:\n f.send(self.xml_string)\n\n def tearDown(self):\n os.remove(self.xml_file_path)\n\n\n def testXmlDoc(self):\n self.assertIsNotNone(XML.XmlDoc.parse(text=self.xml_string), 'Failed to parse xml string.')\n self.assertIsNotNone(XML.XmlDoc.parse(path=self.xml_file_path), 'Failed to parse xml file: ' + self.xml_file_path)\n with open(self.xml_file_path, 'r') as f:\n self.assertIsNotNone(XML.XmlDoc.parse(file=f), 'Failed to parse xml file: ' + self.xml_file_path)\n\n def testXmlElement(self):\n e = XML.XmlElement(name='a')\n e.set_attribute('attr1', '1')\n e.set_value('value')\n self.assertEqual(e.value(), 'value')\n self.assertEqual(e.attribute('attr1'), '1')\n e.add_element(XML.XmlElement(name='b', value='1', attributes={'c':3}))\n e.add_element(XML.XmlElement(name='b', value='2', attributes={'c':4}))\n self.assertEqual(len(e.values('b/@c')), 2)\n \n def testXmlStringWriter(self):\n w = XML.XmlStringWriter('a')\n w.push('e')\n w.add('b', 3, {'c':'1', 'd':'2'})\n w.pop()\n self.assertEqual(w.document(), '3')\n\nif __name__ == \"__main__\":\n # import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1587,"cells":{"__id__":{"kind":"number","value":16544214038150,"string":"16,544,214,038,150"},"blob_id":{"kind":"string","value":"07a63bc6a44474353e7376db401a09ec29d5d60d"},"directory_id":{"kind":"string","value":"067ccb1f6a5764740cb2c2233f2f44b51ba0f3b7"},"path":{"kind":"string","value":"/django_shopping_cart/shopping/urls.py"},"content_id":{"kind":"string","value":"e17d54f07563537312e6f3da7e43eda9be691b0c"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"hnejadi/django-shopping-cart-1"},"repo_url":{"kind":"string","value":"https://github.com/hnejadi/django-shopping-cart-1"},"snapshot_id":{"kind":"string","value":"d33e182253d4264caf9af9e764ce12b0a40c99a0"},"revision_id":{"kind":"string","value":"392a70d0137b2f5bc8d4d0b77c22f209a499b9ae"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2018-05-30T06:49:54.086933","string":"2018-05-30T06:49:54.086933"},"revision_date":{"kind":"timestamp","value":"2014-12-28T20:33:55","string":"2014-12-28T20:33:55"},"committer_date":{"kind":"timestamp","value":"2014-12-28T20:33:55","string":"2014-12-28T20:33:55"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from django.conf.urls import patterns, include, url\n\nfrom shopping.views import ShoppingHome, ShoppingProducts, ShoppingProductDetail\n\nurlpatterns = patterns('',\n url(r'^$', ShoppingHome.as_view(), name='shopping.home'),\n url(r'^(?P[\\w ]+)/$', ShoppingProducts.as_view(), name='shopping.product'),\n url(r'^(?P[\\w ]+)/(?P[\\w ]+)/$', ShoppingProductDetail.as_view(), name='shopping.product.detail'),\n)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1588,"cells":{"__id__":{"kind":"number","value":8933531987992,"string":"8,933,531,987,992"},"blob_id":{"kind":"string","value":"459963c132a2f9df7125c4159bfd9c4475e62638"},"directory_id":{"kind":"string","value":"b4b56221f6f3bcf2b6eed39ffcd9a1489d8271da"},"path":{"kind":"string","value":"/psidialogs/examples/easygui_test.py"},"content_id":{"kind":"string","value":"215d23fea4333057441cbeaec5ce54e84e346138"},"detected_licenses":{"kind":"list like","value":["BSD-2-Clause"],"string":"[\n \"BSD-2-Clause\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"gregwjacobs/psidialogs"},"repo_url":{"kind":"string","value":"https://github.com/gregwjacobs/psidialogs"},"snapshot_id":{"kind":"string","value":"4d0b80cdc13212df3c6b26130ffc6dc377a9e031"},"revision_id":{"kind":"string","value":"4d61bd0e996b6e88c1241f21aca0ed3f05a76ba7"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-18T10:42:04.804589","string":"2021-01-18T10:42:04.804589"},"revision_date":{"kind":"timestamp","value":"2012-09-13T15:02:06","string":"2012-09-13T15:02:06"},"committer_date":{"kind":"timestamp","value":"2012-09-13T15:02:06","string":"2012-09-13T15:02:06"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#\"\"\"\n#\"\"\"\n#\n#from psidialogs.easygui_api import *\n#import psidialogs\n#\n#def main(backend = ''):\n# if not backend:\n# backend = psidialogs.choice(psidialogs.all_backends(), 'Select backend!')\n# psidialogs.set_backend( force_backend=backend ) \n# _test()\n# \n#TkVersion=''\n#EasyGuiRevisionInfo = ''\n# \n#def _test():\n# \"\"\"\n# copy from easygui.py\n# \"\"\" \n# # simple way to clear the console\n# print \"\\n\" * 100\n# # START DEMONSTRATION DATA ===================================================\n# choices_abc = [\"This is choice 1\", \"And this is choice 2\"]\n# message = \"Pick one! This is a huge choice, and you've got to make the right one \" \\\n# \"or you will surely mess up the rest of your life, and the lives of your \" \\\n# \"friends and neighbors!\"\n# title = \"\"\n#\n# # ============================= define a code snippet =========================\n# code_snippet = (\"dafsdfa dasflkj pp[oadsij asdfp;ij asdfpjkop asdfpok asdfpok asdfpok\"*3) +\"\\n\"+\\\n#\"\"\"# here is some dummy Python code\n#for someItem in myListOfStuff:\n# do something(someItem)\n# do something()\n# do something()\n# if somethingElse(someItem):\n# doSomethingEvenMoreInteresting()\n#\n#\"\"\"*16\n# #======================== end of code snippet ==============================\n#\n# #================================= some text ===========================\n# text_snippet = ((\\\n#\"\"\"It was the best of times, and it was the worst of times. The rich ate cake, and the poor had cake recommended to them, but wished only for enough cash to buy bread. The time was ripe for revolution! \"\"\" \\\n#*5)+\"\\n\\n\")*10\n#\n# #===========================end of text ================================\n#\n# intro_message = (\"Pick the kind of box that you wish to demo.\\n\\n\"\n# + \"In EasyGui, all GUI interactions are invoked by simple function calls.\\n\\n\" +\n# \"EasyGui is different from other GUIs in that it is NOT event-driven. It allows\" +\n# \" you to program in a traditional linear fashion, and to put up dialogs for simple\" +\n# \" input and output when you need to. If you are new to the event-driven paradigm\" +\n# \" for GUIs, EasyGui will allow you to be productive with very basic tasks\" +\n# \" immediately. Later, if you wish to make the transition to an event-driven GUI\" +\n# \" paradigm, you can move to an event-driven style with a more powerful GUI package\" +\n# \"such as anygui, PythonCard, Tkinter, wxPython, etc.\"\n# + \"\\n\\nEasyGui is running Tk version: \" + str(TkVersion)\n# )\n#\n# #========================================== END DEMONSTRATION DATA\n#\n#\n# while 1: # do forever\n# choices = [\n# \"msgbox\",\n# \"buttonbox\",\n# \"choicebox\",\n# \"multchoicebox\",\n# \"textbox\",\n# \"ynbox\",\n# \"ccbox\",\n# \"enterbox\",\n# \"codebox\",\n# \"integerbox\",\n# \"boolbox\",\n# \"indexbox\",\n# \"filesavebox\",\n# \"fileopenbox\",\n# \"passwordbox\",\n# \"multenterbox\",\n# \"multpasswordbox\",\n# \"diropenbox\"\n#\n# ]\n# choice = choicebox(intro_message, \"EasyGui \" + EasyGuiRevisionInfo, choices)\n#\n# if choice == None: return\n#\n# reply = choice.split()\n#\n# if reply[0] == \"msgbox\":\n# reply = msgbox(\"short message\", \"This is a long title\")\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"buttonbox\":\n# reply = buttonbox()\n# print \"Reply was:\", reply\n#\n# reply = buttonbox(message, \"Demo of Buttonbox with many, many buttons!\", choices)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"boolbox\":\n# reply = boolbox()\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"integerbox\":\n# reply = integerbox(\n# \"Enter a number between 3 and 333\",\n# \"Demo: integerbox WITH a default value\",\n# 222, 3, 333)\n# print \"Reply was:\", reply\n#\n# reply = integerbox(\n# \"Enter a number between 0 and 99\",\n# \"Demo: integerbox WITHOUT a default value\"\n# )\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"diropenbox\":\n# title = \"Demo of diropenbox\"\n# msg = \"This is a test of the diropenbox.\\n\\nPick the directory that you wish to open.\"\n# d = diropenbox(msg, title)\n# print \"You chose directory...:\", d\n#\n# elif reply[0] == \"fileopenbox\":\n# f = fileopenbox()\n# print \"You chose to open file:\", f\n#\n# elif reply[0] == \"filesavebox\":\n# f = filesavebox()\n# print \"You chose to save file:\", f\n#\n# elif reply[0] == \"indexbox\":\n# title = reply[0]\n# msg = \"Demo of \" + reply[0]\n# choices = [\"Choice1\", \"Choice2\", \"Choice3\", \"Choice4\"]\n# reply = indexbox(msg, title, choices)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"passwordbox\":\n# reply = passwordbox(\"Demo of password box WITHOUT default\"\n# + \"\\n\\nEnter your secret password\", \"Member Logon\")\n# print \"Reply was:\", str(reply)\n#\n# reply = passwordbox(\"Demo of password box WITH default\"\n# + \"\\n\\nEnter your secret password\", \"Member Logon\", \"alfie\")\n# print \"Reply was:\", str(reply)\n#\n# elif reply[0] == \"enterbox\":\n# reply = enterbox(\"Enter the name of your best friend:\", \"Love!\", \"Suzy Smith\")\n# print \"Reply was:\", str(reply)\n#\n# reply = enterbox(\"Enter the name of your worst enemy:\", \"Hate!\")\n# print \"Reply was:\", str(reply)\n#\n# elif reply[0] == \"multenterbox\":\n# msg = \"Enter your personal information\"\n# title = \"Credit Card Application\"\n# fieldNames = [\"Name\",\"Street Address\",\"City\",\"State\",\"ZipCode\"]\n# fieldValues = [] # we start with blanks for the values\n# fieldValues = multenterbox(msg,title, fieldNames)\n#\n# # make sure that none of the fields was left blank\n# while 1:\n# if fieldValues == None: break\n# errmsg = \"\"\n# for i in range(len(fieldNames)):\n# if fieldValues[i].strip() == \"\":\n# errmsg = errmsg + ('\"%s\" is a required field.\\n\\n' % fieldNames[i])\n# if errmsg == \"\": break # no problems found\n# fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)\n#\n# print \"Reply was:\", fieldValues\n#\n# elif reply[0] == \"multpasswordbox\":\n# msg = \"Enter logon information\"\n# title = \"Demo of multpasswordbox\"\n# fieldNames = [\"Server ID\", \"User ID\", \"Password\"]\n# fieldValues = [] # we start with blanks for the values\n# fieldValues = multpasswordbox(msg,title, fieldNames)\n#\n# # make sure that none of the fields was left blank\n# while 1:\n# if fieldValues == None: break\n# errmsg = \"\"\n# for i in range(len(fieldNames)):\n# if fieldValues[i].strip() == \"\":\n# errmsg = errmsg + ('\"%s\" is a required field.\\n\\n' % fieldNames[i])\n# if errmsg == \"\": break # no problems found\n# fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)\n#\n# print \"Reply was:\", fieldValues\n#\n#\n# elif reply[0] == \"ynbox\":\n# reply = ynbox(message, title)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"ccbox\":\n# reply = ccbox(message)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"choicebox\":\n# longchoice = \"This is an example of a very long option which you may or may not wish to choose.\"*2\n# listChoices = [\"nnn\", \"ddd\", \"eee\", \"fff\", \"aaa\", longchoice\n# , \"aaa\", \"bbb\", \"ccc\", \"ggg\", \"hhh\", \"iii\", \"jjj\", \"kkk\", \"LLL\", \"mmm\" , \"nnn\", \"ooo\", \"ppp\", \"qqq\", \"rrr\", \"sss\", \"ttt\", \"uuu\", \"vvv\"]\n#\n# message = \"Pick something. \" + (\"A wrapable sentence of text ?! \"*30) + \"\\nA separate line of text.\"*6\n# reply = choicebox(message, None, listChoices)\n# print \"Reply was:\", reply\n#\n# message = \"Pick something. \"\n# reply = choicebox(message, None, listChoices)\n# print \"Reply was:\", reply\n#\n# message = \"Pick something. \"\n# reply = choicebox(\"The list of choices is empty!\", None, [])\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"multchoicebox\":\n# listChoices = [\"aaa\", \"bbb\", \"ccc\", \"ggg\", \"hhh\", \"iii\", \"jjj\", \"kkk\"\n# , \"LLL\", \"mmm\" , \"nnn\", \"ooo\", \"ppp\", \"qqq\"\n# , \"rrr\", \"sss\", \"ttt\", \"uuu\", \"vvv\"]\n#\n# message = \"Pick as many choices as you wish.\"\n# reply = multchoicebox(message,\"DEMO OF multchoicebox\", listChoices)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"textbox\":\n# message = \"Here is some sample text. \" * 16\n# reply = textbox(message, \"Text Sample\", text_snippet)\n# print \"Reply was:\", reply\n#\n# elif reply[0] == \"codebox\":\n# message = \"Here is some sample code. \" * 16\n# reply = codebox(message, \"Code Sample\", code_snippet)\n# print \"Reply was:\", reply\n#\n# else:\n# msgbox(\"Choice\\n\\n\" + choice + \"\\n\\nis not recognized\", \"Program Logic Error\")\n# return\n#\n#\n###if __name__ == '__main__':\n### _test()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1589,"cells":{"__id__":{"kind":"number","value":3959959853134,"string":"3,959,959,853,134"},"blob_id":{"kind":"string","value":"72ab469361ab4a3f2f765677a0e75713b9c12958"},"directory_id":{"kind":"string","value":"e1e99f896bde6fe558e6ec160a1ab4c587c99b7a"},"path":{"kind":"string","value":"/helpers/gpodder-0.11.3-hacked/build/lib/gpodder/util.py"},"content_id":{"kind":"string","value":"b433d190b7d72b40303656aac3b18831b2c9bea6"},"detected_licenses":{"kind":"list like","value":["RPL-1.5","LicenseRef-scancode-unknown-license-reference","GPL-1.0-or-later","GPL-2.0-only","GPL-3.0-only","GPL-3.0-or-later"],"string":"[\n \"RPL-1.5\",\n \"LicenseRef-scancode-unknown-license-reference\",\n \"GPL-1.0-or-later\",\n \"GPL-2.0-only\",\n \"GPL-3.0-only\",\n \"GPL-3.0-or-later\"\n]"},"license_type":{"kind":"string","value":"non_permissive"},"repo_name":{"kind":"string","value":"uberchicgeekchick/alacast"},"repo_url":{"kind":"string","value":"https://github.com/uberchicgeekchick/alacast"},"snapshot_id":{"kind":"string","value":"321cff569c6fdefafaaa8ab4b85cf69491163f84"},"revision_id":{"kind":"string","value":"246ec411f00f1aed730cf1d008e8162c80672ef6"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-04T20:52:26.881516","string":"2020-05-04T20:52:26.881516"},"revision_date":{"kind":"timestamp","value":"2011-06-27T07:39:59","string":"2011-06-27T07:39:59"},"committer_date":{"kind":"timestamp","value":"2011-06-27T07:39:59","string":"2011-06-27T07:39:59"},"github_id":{"kind":"number","value":51056,"string":"51,056"},"star_events_count":{"kind":"number","value":4,"string":"4"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\n#\n# gPodder - A media aggregator and podcast client\n# Copyright (c) 2005-2008 Thomas Perl and the gPodder Team\n#\n# gPodder is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# gPodder is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\n#\n# util.py -- Misc utility functions\n# Thomas Perl 2007-08-04\n#\n\n\"\"\"Miscellaneous helper functions for gPodder\n\nThis module provides helper and utility functions for gPodder that \nare not tied to any specific part of gPodder.\n\n\"\"\"\n\nimport gpodder\nfrom gpodder.liblogger import log\n\nimport gtk\nimport gobject\n\nimport os\nimport os.path\nimport glob\nimport stat\n\nimport re\nimport subprocess\nfrom htmlentitydefs import entitydefs\nimport time\nimport locale\nimport gzip\nimport datetime\nimport threading\n\nimport urlparse\nimport urllib\nimport urllib2\nimport httplib\nimport webbrowser\n\nimport feedparser\n\nimport StringIO\nimport xml.dom.minidom\n\n\nif gpodder.interface == gpodder.GUI:\n ICON_UNPLAYED=gtk.STOCK_YES\n ICON_LOCKED='emblem-nowrite'\nelif gpodder.interface == gpodder.MAEMO:\n ICON_UNPLAYED='qgn_list_gene_favor'\n ICON_LOCKED='qgn_indi_KeypadLk_lock'\n\ndef make_directory( path):\n \"\"\"\n Tries to create a directory if it does not exist already.\n Returns True if the directory exists after the function \n call, False otherwise.\n \"\"\"\n if os.path.isdir( path):\n return True\n\n try:\n os.makedirs( path)\n except:\n log( 'Could not create directory: %s', path)\n return False\n\n return True\n\n\ndef normalize_feed_url( url):\n \"\"\"\n Converts any URL to http:// or ftp:// so that it can be \n used with \"wget\". If the URL cannot be converted (invalid\n or unknown scheme), \"None\" is returned.\n\n This will also normalize feed:// and itpc:// to http://\n Also supported are phobos.apple.com links (iTunes podcast)\n and itms:// links (iTunes podcast direct link).\n \"\"\"\n\n if not url or len( url) < 8:\n return None\n\n if url.startswith('itms://'):\n url=parse_itunes_xml(url)\n\n # Links to \"phobos.apple.com\"\n url=itunes_discover_rss(url)\n if url is None:\n return None\n \n if url.startswith( 'http://') or url.startswith( 'https://') or url.startswith( 'ftp://'):\n return url\n\n if url.startswith('feed://') or url.startswith('itpc://'):\n return 'http://' + url[7:]\n\n return None\n\n\ndef username_password_from_url( url):\n \"\"\"\n Returns a tuple (username,password) containing authentication\n data from the specified URL or (None,None) if no authentication\n data can be found in the URL.\n \"\"\"\n (username, password)=(None, None)\n\n (scheme, netloc, path, params, query, fragment)=urlparse.urlparse( url)\n\n if '@' in netloc:\n (authentication, netloc)=netloc.rsplit('@', 1)\n if ':' in authentication:\n (username, password)=authentication.split(':', 1)\n username=urllib.unquote(username)\n password=urllib.unquote(password)\n else:\n username=urllib.unquote(authentication)\n\n return (username, password)\n\n\ndef directory_is_writable( path):\n \"\"\"\n Returns True if the specified directory exists and is writable\n by the current user.\n \"\"\"\n return os.path.isdir( path) and os.access( path, os.W_OK)\n\n\ndef calculate_size( path):\n \"\"\"\n Tries to calculate the size of a directory, including any \n subdirectories found. The returned value might not be \n correct if the user doesn't have appropriate permissions \n to list all subdirectories of the given path.\n \"\"\"\n if path is None:\n return 0L\n\n if os.path.dirname( path) == '/':\n return 0L\n\n if os.path.isfile( path):\n return os.path.getsize( path)\n\n if os.path.isdir( path) and not os.path.islink( path):\n sum=os.path.getsize( path)\n\n try:\n for item in os.listdir(path):\n try:\n sum += calculate_size(os.path.join(path, item))\n except:\n log('Cannot get size for %s', path)\n except:\n log('Cannot access: %s', path)\n\n return sum\n\n return 0L\n\n\ndef file_modification_datetime(filename):\n \"\"\"\n Returns the modification date of the specified file\n as a datetime.datetime object or None if the modification\n date cannot be determined.\n \"\"\"\n if filename is None:\n return None\n\n if not os.access(filename, os.R_OK):\n return None\n\n try:\n s=os.stat(filename)\n timestamp=s[stat.ST_MTIME]\n return datetime.datetime.fromtimestamp(timestamp)\n except:\n log('Cannot get modification timestamp for %s', filename)\n return None\n\n\ndef file_age_in_days(filename):\n \"\"\"\n Returns the age of the specified filename in days or\n zero if the modification date cannot be determined.\n \"\"\"\n dt=file_modification_datetime(filename)\n if dt is None:\n return 0\n else:\n return (datetime.datetime.now()-dt).days\n\n\ndef file_age_to_string(days):\n \"\"\"\n Converts a \"number of days\" value to a string that\n can be used in the UI to display the file age.\n\n >>> file_age_to_string(0)\n ''\n >>> file_age_to_string(1)\n 'one day ago'\n >>> file_age_to_String(2)\n '2 days ago'\n \"\"\"\n if days == 1:\n return _('one day ago')\n elif days > 1:\n return _('%d days ago') % days\n else:\n return ''\n\n\ndef get_free_disk_space(path):\n \"\"\"\n Calculates the free disk space available to the current user\n on the file system that contains the given path.\n\n If the path (or its parent folder) does not yet exist, this\n function returns zero.\n \"\"\"\n\n path=os.path.dirname(path)\n if not os.path.exists(path):\n return 0\n\n s=os.statvfs(path)\n\n return s.f_bavail * s.f_bsize\n\n\ndef format_date(timestamp):\n \"\"\"\n Converts a UNIX timestamp to a date representation. This\n function returns \"Today\", \"Yesterday\", a weekday name or\n the date in %x format, which (according to the Python docs)\n is the \"Locale's appropriate date representation\".\n\n Returns None if there has been an error converting the\n timestamp to a string representation.\n \"\"\"\n seconds_in_a_day=60*60*24\n try:\n diff=int((time.time()+1)/seconds_in_a_day) - int(timestamp/seconds_in_a_day)\n except:\n log('Warning: Cannot convert \"%s\" to date.', timestamp, traceback=True)\n return None\n \n if diff == 0:\n return _('Today')\n elif diff == 1:\n return _('Yesterday')\n elif diff < 7:\n # Weekday name\n return str(datetime.datetime.fromtimestamp(timestamp).strftime('%A'))\n else:\n # Locale's appropriate date representation\n return str(datetime.datetime.fromtimestamp(timestamp).strftime('%x'))\n\n\ndef format_filesize(bytesize, use_si_units=False, digits=2):\n \"\"\"\n Formats the given size in bytes to be human-readable, \n\n Returns a localized \"(unknown)\" string when the bytesize\n has a negative value.\n \"\"\"\n si_units=(\n ( 'kB', 10**3 ),\n ( 'MB', 10**6 ),\n ( 'GB', 10**9 ),\n )\n\n binary_units=(\n ( 'KiB', 2**10 ),\n ( 'MiB', 2**20 ),\n ( 'GiB', 2**30 ),\n )\n\n try:\n bytesize=float( bytesize)\n except:\n return _('(unknown)')\n\n if bytesize < 0:\n return _('(unknown)')\n\n if use_si_units:\n units=si_units\n else:\n units=binary_units\n\n ( used_unit, used_value )=( 'B', bytesize )\n\n for ( unit, value ) in units:\n if bytesize >= value:\n used_value=bytesize / float(value)\n used_unit=unit\n\n return ('%.'+str(digits)+'f %s') % (used_value, used_unit)\n\n\ndef delete_file( path):\n \"\"\"\n Tries to delete the given filename and silently \n ignores deletion errors (if the file doesn't exist).\n Also deletes extracted cover files if they exist.\n \"\"\"\n log( 'Trying to delete: %s', path)\n try:\n os.unlink( path)\n # Remove any extracted cover art that might exist\n for cover_file in glob.glob( '%s.cover.*' % ( path, )):\n os.unlink( cover_file)\n\n except:\n pass\n\n\n\ndef remove_html_tags(html):\n \"\"\"\n Remove HTML tags from a string and replace numeric and\n named entities with the corresponding character, so the \n HTML text can be displayed in a simple text view.\n \"\"\"\n # If we would want more speed, we could make these global\n re_strip_tags=re.compile('<[^>]*>')\n re_unicode_entities=re.compile('&#(\\d{2,4});')\n re_html_entities=re.compile('&(.{2,8});')\n\n # Remove all HTML/XML tags from the string\n result=re_strip_tags.sub('', html)\n\n # Convert numeric XML entities to their unicode character\n result=re_unicode_entities.sub(lambda x: unichr(int(x.group(1))), result)\n\n # Convert named HTML entities to their unicode character\n result=re_html_entities.sub(lambda x: unicode(entitydefs.get(x.group(1),''), 'iso-8859-1'), result)\n\n return result.strip()\n\n\ndef torrent_filename( filename):\n \"\"\"\n Checks if a file is a \".torrent\" file by examining its \n contents and searching for the file name of the file \n to be downloaded.\n\n Returns the name of the file the \".torrent\" will download \n or None if no filename is found (the file is no \".torrent\")\n \"\"\"\n if not os.path.exists( filename):\n return None\n\n header=open( filename).readline()\n try:\n header.index( '6:pieces')\n name_length_pos=header.index('4:name') + 6\n\n colon_pos=header.find( ':', name_length_pos)\n name_length=int(header[name_length_pos:colon_pos]) + 1\n name=header[(colon_pos + 1):(colon_pos + name_length)]\n return name\n except:\n return None\n\n\ndef file_extension_from_url( url):\n \"\"\"\n Extracts the (lowercase) file name extension (with dot)\n from a URL, e.g. http://server.com/file.MP3?download=yes\n will result in the string \".mp3\" being returned.\n\n This function will also try to best-guess the \"real\" \n extension for a media file (audio, video, torrent) by \n trying to match an extension to these types and recurse\n into the query string to find better matches, if the \n original extension does not resolve to a known type.\n\n http://my.net/redirect.php?my.net/file.ogg => \".ogg\"\n http://server/get.jsp?file=/episode0815.MOV => \".mov\"\n \"\"\"\n (scheme, netloc, path, para, query, fragid)=urlparse.urlparse(url)\n filename=os.path.basename( urllib.unquote(path))\n (filename, extension)=os.path.splitext(filename)\n\n if file_type_by_extension(extension) is not None:\n # We have found a valid extension (audio, video, torrent)\n return extension.lower()\n \n # If the query string looks like a possible URL, try that first\n if len(query.strip()) > 0 and query.find('/') != -1:\n query_url='://'.join((scheme, urllib.unquote(query)))\n query_extension=file_extension_from_url(query_url)\n\n if file_type_by_extension(query_extension) is not None:\n return query_extension\n\n # No exact match found, simply return the original extension\n return extension.lower()\n\n\ndef file_type_by_extension( extension):\n \"\"\"\n Tries to guess the file type by looking up the filename \n extension from a table of known file types. Will return \n the type as string (\"audio\", \"video\" or \"torrent\") or \n None if the file type cannot be determined.\n \"\"\"\n types={\n 'audio': [ 'mp3', 'ogg', 'wav', 'wma', 'aac', 'm4a' ],\n 'video': [ 'mp4', 'avi', 'mpg', 'mpeg', 'm4v', 'mov', 'divx', 'flv', 'wmv', '3gp' ],\n 'torrent': [ 'torrent' ],\n }\n\n if extension == '':\n return None\n\n if extension[0] == '.':\n extension=extension[1:]\n\n extension=extension.lower()\n\n for type in types:\n if extension in types[type]:\n return type\n \n return None\n\n\ndef get_tree_icon(icon_name, add_bullet=False, add_padlock=False, icon_cache=None, icon_size=32):\n \"\"\"\n Loads an icon from the current icon theme at the specified\n size, suitable for display in a gtk.TreeView.\n\n Optionally adds a green bullet (the GTK Stock \"Yes\" icon)\n to the Pixbuf returned. Also, a padlock icon can be added.\n\n If an icon_cache parameter is supplied, it has to be a\n dictionary and will be used to store generated icons. \n\n On subsequent calls, icons will be loaded from cache if \n the cache is supplied again and the icon is found in \n the cache.\n \"\"\"\n global ICON_UNPLAYED, ICON_LOCKED\n\n if icon_cache is not None and (icon_name,add_bullet,add_padlock,icon_size) in icon_cache:\n return icon_cache[(icon_name,add_bullet,add_padlock,icon_size)]\n \n icon_theme=gtk.icon_theme_get_default()\n\n try:\n icon=icon_theme.load_icon(icon_name, icon_size, 0)\n except:\n log( '(get_tree_icon) Warning: Cannot load icon with name \"%s\", will use default icon.', icon_name)\n icon=icon_theme.load_icon(gtk.STOCK_DIALOG_QUESTION, icon_size, 0)\n\n if icon and (add_bullet or add_padlock):\n # We'll modify the icon, so use .copy()\n if add_bullet:\n try:\n icon=icon.copy()\n emblem=icon_theme.load_icon(ICON_UNPLAYED, int(float(icon_size)*1.2/3.0), 0)\n (width, height)=(emblem.get_width(), emblem.get_height())\n xpos=icon.get_width() - width\n ypos=icon.get_height() - height\n emblem.composite(icon, xpos, ypos, width, height, xpos, ypos, 1, 1, gtk.gdk.INTERP_BILINEAR, 255)\n except:\n log('(get_tree_icon) Error adding emblem to icon \"%s\".', icon_name)\n if add_padlock:\n try:\n icon=icon.copy()\n emblem=icon_theme.load_icon(ICON_LOCKED, int(float(icon_size)/2.0), 0)\n (width, height)=(emblem.get_width(), emblem.get_height())\n emblem.composite(icon, 0, 0, width, height, 0, 0, 1, 1, gtk.gdk.INTERP_BILINEAR, 255)\n except:\n log('(get_tree_icon) Error adding emblem to icon \"%s\".', icon_name)\n\n if icon_cache is not None:\n icon_cache[(icon_name,add_bullet,add_padlock,icon_size)]=icon\n\n return icon\n\n\ndef get_first_line( s):\n \"\"\"\n Returns only the first line of a string, stripped so\n that it doesn't have whitespace before or after.\n \"\"\"\n return s.strip().split('\\n')[0].strip()\n\n\ndef updated_parsed_to_rfc2822( updated_parsed):\n \"\"\"\n Converts a 9-tuple from feedparser's updated_parsed \n field to a C-locale string suitable for further use.\n\n If the updated_parsed field is None or not a 9-tuple,\n this function returns None.\n \"\"\"\n if updated_parsed is None or len(updated_parsed) != 9:\n return None\n\n old_locale=locale.getlocale( locale.LC_TIME)\n locale.setlocale( locale.LC_TIME, 'C')\n result=time.strftime( '%a, %d %b %Y %H:%M:%S GMT', updated_parsed)\n if old_locale != (None, None):\n try:\n locale.setlocale( locale.LC_TIME, old_locale)\n except:\n log('Cannot revert locale to (%s, %s)', *old_locale)\n pass\n return result\n\n\ndef object_string_formatter( s, **kwargs):\n \"\"\"\n Makes attributes of object passed in as keyword \n arguments available as {OBJECTNAME.ATTRNAME} in \n the passed-in string and returns a string with \n the above arguments replaced with the attribute \n values of the corresponding object.\n\n Example:\n\n e=Episode()\n e.title='Hello'\n s='{episode.title} World'\n \n print object_string_formatter( s, episode=e)\n => 'Hello World'\n \"\"\"\n result=s\n for ( key, o ) in kwargs.items():\n matches=re.findall( r'\\{%s\\.([^\\}]+)\\}' % key, s)\n for attr in matches:\n if hasattr( o, attr):\n try:\n from_s='{%s.%s}' % ( key, attr )\n to_s=getattr( o, attr)\n result=result.replace( from_s, to_s)\n except:\n log( 'Could not replace attribute \"%s\" in string \"%s\".', attr, s)\n\n return result\n\n\ndef format_desktop_command( command, filename):\n \"\"\"\n Formats a command template from the \"Exec=\" line of a .desktop\n file to a string that can be invoked in a shell.\n\n Handled format strings: %U, %u, %F, %f and a fallback that\n appends the filename as first parameter of the command.\n\n See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html\n \"\"\"\n items={\n '%U': 'file://%s' % filename,\n '%u': 'file://%s' % filename,\n '%F': filename,\n '%f': filename,\n }\n\n for key, value in items.items():\n if command.find( key) >= 0:\n return command.replace( key, value)\n\n return '%s \"%s\"' % ( command, filename )\n\n\ndef find_command( command):\n \"\"\"\n Searches the system's PATH for a specific command that is\n executable by the user. Returns the first occurence of an\n executable binary in the PATH, or None if the command is \n not available.\n \"\"\"\n\n if 'PATH' not in os.environ:\n return None\n\n for path in os.environ['PATH'].split( os.pathsep):\n command_file=os.path.join( path, command)\n if os.path.isfile( command_file) and os.access( command_file, os.X_OK):\n return command_file\n \n return None\n\n\ndef parse_itunes_xml(url):\n \"\"\"\n Parses an XML document in the \"url\" parameter (this has to be\n a itms:// or http:// URL to a XML doc) and searches all \"\"\n elements for the first occurence of a \"feedURL\"\n element and then continues the search for the string value of\n this key.\n\n This returns the RSS feed URL for Apple iTunes Podcast XML\n documents that are retrieved by itunes_discover_rss().\n \"\"\"\n url=url.replace('itms://', 'http://')\n doc=http_get_and_gunzip(url)\n try:\n d=xml.dom.minidom.parseString(doc)\n except Exception, e:\n log('Error parsing document from itms:// URL: %s', e)\n return None\n last_key=None\n for pairs in d.getElementsByTagName('dict'):\n for node in pairs.childNodes:\n if node.nodeType != node.ELEMENT_NODE:\n continue\n\n if node.tagName == 'key' and node.childNodes.length > 0:\n if node.firstChild.nodeType == node.TEXT_NODE:\n last_key=node.firstChild.data\n\n if last_key != 'feedURL':\n continue\n\n if node.tagName == 'string' and node.childNodes.length > 0:\n if node.firstChild.nodeType == node.TEXT_NODE:\n return node.firstChild.data\n\n return None\n\n\ndef http_get_and_gunzip(uri):\n \"\"\"\n Does a HTTP GET request and tells the server that we accept\n gzip-encoded data. This is necessary, because the Apple iTunes\n server will always return gzip-encoded data, regardless of what\n we really request.\n\n Returns the uncompressed document at the given URI.\n \"\"\"\n request=urllib2.Request(uri)\n request.add_header(\"Accept-encoding\", \"gzip\")\n usock=urllib2.urlopen(request)\n data=usock.read()\n if usock.headers.get('content-encoding', None) == 'gzip':\n data=gzip.GzipFile(fileobj=StringIO.StringIO(data)).read()\n return data\n\n\ndef itunes_discover_rss(url):\n \"\"\"\n Takes an iTunes-specific podcast URL and turns it\n into a \"normal\" RSS feed URL. If the given URL is\n not a phobos.apple.com URL, we will simply return\n the URL and assume it's already an RSS feed URL.\n\n Idea from Andrew Clarke's itunes-url-decoder.py\n \"\"\"\n\n if url is None:\n return url\n\n if not 'phobos.apple.com' in url.lower():\n # This doesn't look like an iTunes URL\n return url\n\n try:\n data=http_get_and_gunzip(url)\n (url,)=re.findall(\"itmsOpen\\('([^']*)\", data)\n return parse_itunes_xml(url)\n except:\n return None\n\n\ndef idle_add(func, *args):\n \"\"\"\n This is a wrapper function that does the Right\n Thing depending on if we are running a GTK+ GUI or\n not. If not, we're simply calling the function.\n\n If we are a GUI app, we use gobject.idle_add() to\n call the function later - this is needed for\n threads to be able to modify GTK+ widget data.\n \"\"\"\n if gpodder.interface in (gpodder.GUI, gpodder.MAEMO):\n def x(f, *a):\n f(*a)\n return False\n\n gobject.idle_add(func, *args)\n else:\n func(*args)\n\n\ndef discover_bluetooth_devices():\n \"\"\"\n This is a generator function that returns\n (address, name) tuples of all nearby bluetooth\n devices found.\n\n If the user has python-bluez installed, it will\n be used. If not, we're trying to use \"hcitool\".\n\n If neither python-bluez or hcitool are available,\n this function is the empty generator.\n \"\"\"\n try:\n # If the user has python-bluez installed\n import bluetooth\n log('Using python-bluez to find nearby bluetooth devices')\n for name, addr in bluetooth.discover_devices(lookup_names=True):\n yield (name, addr)\n except:\n if find_command('hcitool') is not None:\n log('Using hcitool to find nearby bluetooth devices')\n # If the user has \"hcitool\" installed\n p=subprocess.Popen(['hcitool', 'scan'], stdout=subprocess.PIPE)\n for line in p.stdout:\n match=re.match('^\\t([^\\t]+)\\t([^\\t]+)\\n$', line)\n if match is not None:\n (addr, name)=match.groups()\n yield (name, addr)\n else:\n log('Cannot find either python-bluez or hcitool - no bluetooth?')\n return # <= empty generator\n\n\ndef bluetooth_send_file(filename, device=None, callback_finished=None):\n \"\"\"\n Sends a file via bluetooth using gnome-obex send.\n Optional parameter device is the bluetooth address\n of the device; optional parameter callback_finished\n is a callback function that will be called when the\n sending process has finished - it gets one parameter\n that is either True (when sending succeeded) or False\n when there was some error.\n\n This function tries to use \"bluetooth-sendto\", and if\n it is not available, it also tries \"gnome-obex-send\".\n \"\"\"\n command_line=None\n\n if find_command('bluetooth-sendto'):\n command_line=['bluetooth-sendto']\n if device is not None:\n command_line.append('--device=%s' % device)\n elif find_command('gnome-obex-send'):\n command_line=['gnome-obex-send']\n if device is not None:\n command_line += ['--dest', device]\n\n if command_line is not None:\n command_line.append(filename)\n result=(subprocess.Popen(command_line).wait() == 0)\n if callback_finished is not None:\n callback_finished(result)\n return result\n else:\n log('Cannot send file. Please install \"bluetooth-sendto\" or \"gnome-obex-send\".')\n if callback_finished is not None:\n callback_finished(False)\n return False\n \n \ndef format_seconds_to_hour_min_sec(seconds):\n \"\"\"\n Take the number of seconds and format it into a\n human-readable string (duration).\n\n >>> format_seconds_to_hour_min_sec(3834)\n '1 hour, 3 minutes and 54 seconds'\n >>> format_seconds_to_hour_min_sec(2600)\n '1 hour'\n >>> format_seconds_to_hour_min_sec(62)\n '1 minute and 2 seconds'\n \"\"\"\n\n if seconds < 1:\n return _('0 seconds')\n\n result=[]\n\n hours=seconds/3600\n seconds=seconds%3600\n\n minutes=seconds/60\n seconds=seconds%60\n\n if hours == 1:\n result.append(_('1 hour'))\n elif hours > 1:\n result.append(_('%i hours') % hours)\n\n if minutes == 1:\n result.append(_('1 minute'))\n elif minutes > 1:\n result.append(_('%i minutes') % minutes)\n\n if seconds == 1:\n result.append(_('1 second'))\n elif seconds > 1:\n result.append(_('%i seconds') % seconds)\n\n if len(result) > 1:\n return (' '+_('and')+' ').join((', '.join(result[:-1]), result[-1]))\n else:\n return result[0]\n\n\ndef get_episode_info_from_url(url, proxy=None):\n \"\"\"\n Try to get information about a podcast episode by sending\n a HEAD request to the HTTP server and parsing the result.\n\n The return value is a dict containing all fields that \n could be parsed from the URL. This currently contains:\n \n \"length\": The size of the file in bytes\n \"pubdate\": A formatted representation of the pubDate\n\n If the \"proxy\" parameter is used, it has to be the URL \n of the HTTP proxy server to use, e.g. http://proxy:8080/\n \n If there is an error, this function returns {}. This will\n only function with http:// and https:// URLs.\n \"\"\"\n if not (url.startswith('http://') or url.startswith('https://')):\n return {}\n\n if proxy is None or proxy.strip() == '':\n (scheme, netloc, path, parms, qry, fragid)=urlparse.urlparse(url)\n conn=httplib.HTTPConnection(netloc)\n start=len(scheme) + len('://') + len(netloc)\n conn.request('HEAD', url[start:])\n else:\n (scheme, netloc, path, parms, qry, fragid)=urlparse.urlparse(proxy)\n conn=httplib.HTTPConnection(netloc)\n conn.request('HEAD', url)\n\n r=conn.getresponse()\n result={}\n\n log('Trying to get metainfo for %s', url)\n\n if 'content-length' in r.msg:\n try:\n length=int(r.msg['content-length'])\n result['length']=length\n except ValueError, e:\n log('Error converting content-length header.')\n\n if 'last-modified' in r.msg:\n try:\n parsed_date=feedparser._parse_date(r.msg['last-modified'])\n pubdate=updated_parsed_to_rfc2822(parsed_date)\n result['pubdate']=pubdate\n except:\n log('Error converting last-modified header.')\n\n return result\n\n\ndef gui_open(filename):\n \"\"\"\n Open a file or folder with the default application set\n by the Desktop environment. This uses \"xdg-open\".\n \"\"\"\n try:\n subprocess.Popen(['xdg-open', filename])\n # FIXME: Win32-specific \"open\" code needed here\n # as fallback when xdg-open not available\n except:\n log('Cannot open file/folder: \"%s\"', folder, sender=self, traceback=True)\n\n\ndef open_website(url):\n \"\"\"\n Opens the specified URL using the default system web\n browser. This uses Python's \"webbrowser\" module, so\n make sure your system is set up correctly.\n \"\"\"\n threading.Thread(target=webbrowser.open, args=(url,)).start()\n\n\ndef sanitize_filename(filename):\n \"\"\"\n Generate a sanitized version of a filename that can\n be written on disk (i.e. remove/replace invalid \n characters and encode in the native language)\n \"\"\"\n # Try to detect OS encoding (by Leonid Ponomarev)\n if 'LANG' in os.environ and '.' in os.environ['LANG']:\n lang=os.environ['LANG']\n (language, encoding)=lang.rsplit('.', 1)\n log('Detected encoding: %s', encoding)\n enc=encoding\n else:\n # Using iso-8859-15 here as (hopefully) sane default\n # see http://en.wikipedia.org/wiki/ISO/IEC_8859-1\n log('Using ISO-8859-15 as encoding. If this')\n log('is incorrect, please set your $LANG variable.')\n enc='iso-8859-15'\n\n return re.sub('[/|?*<>:+\\[\\]\\\"\\\\\\]', '_', filename.strip().encode(enc, 'ignore'))\n\n\ndef find_mount_point(directory):\n \"\"\"\n Try to find the mount point for a given directory.\n If the directory is itself a mount point, return\n it. If not, remove the last part of the path and\n re-check if it's a mount point. If the directory\n resides on your root filesystem, \"/\" is returned.\n \"\"\"\n while os.path.split(directory)[0] != '/':\n if os.path.ismount(directory):\n return directory\n else:\n (directory, tail_data)=os.path.split(directory)\n\n return '/'\n\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2011,"string":"2,011"}}},{"rowIdx":1590,"cells":{"__id__":{"kind":"number","value":14766097570036,"string":"14,766,097,570,036"},"blob_id":{"kind":"string","value":"06f149803dbcdcc617125a4fac85da6e7b1c9f75"},"directory_id":{"kind":"string","value":"16cf037467d0263af748860927ec1b860c5c21b6"},"path":{"kind":"string","value":"/src/contentmakeup/markup/markdown.py"},"content_id":{"kind":"string","value":"c713c02b33535ddca1bdd054cf528b62facbf053"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"michalbachowski/pycontentmakeup"},"repo_url":{"kind":"string","value":"https://github.com/michalbachowski/pycontentmakeup"},"snapshot_id":{"kind":"string","value":"322ec1623c049e9237e33a8bb30202bac452c34c"},"revision_id":{"kind":"string","value":"9ccacc8941ff2f11b8fd35704900ee3c9db1aad2"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-01T18:54:42.149993","string":"2021-01-01T18:54:42.149993"},"revision_date":{"kind":"timestamp","value":"2013-11-23T19:01:23","string":"2013-11-23T19:01:23"},"committer_date":{"kind":"timestamp","value":"2013-11-23T19:01:23","string":"2013-11-23T19:01:23"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport markdown2\nfrom contentmakeup.markup import ParserInterface\n\n\nclass Markdown(ParserInterface):\n accepts = ('md', 'markdown')\n\n def parse(self, input_type, output_format, text):\n return markdown2.markdown(text)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1591,"cells":{"__id__":{"kind":"number","value":2851858299752,"string":"2,851,858,299,752"},"blob_id":{"kind":"string","value":"af0ecfa90516f9623e6dfde104827ce24aed689b"},"directory_id":{"kind":"string","value":"a91b5726d755d671ec9db2633074df569e0615c6"},"path":{"kind":"string","value":"/botconfig.py"},"content_id":{"kind":"string","value":"fc316bbbdec7f2f73d4a18729d142a47bb29fcaa"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"toplel/-mechmech"},"repo_url":{"kind":"string","value":"https://github.com/toplel/-mechmech"},"snapshot_id":{"kind":"string","value":"0210ff756d8888f6fa7f4430aedadcece5a98bd1"},"revision_id":{"kind":"string","value":"3c8672e674d4d66a0e3d3ed43353da609dad8473"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-15T04:34:35.182045","string":"2020-06-15T04:34:35.182045"},"revision_date":{"kind":"timestamp","value":"2014-03-02T19:44:27","string":"2014-03-02T19:44:27"},"committer_date":{"kind":"timestamp","value":"2014-03-02T19:44:27","string":"2014-03-02T19:44:27"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"host = \"irc.installgentoo.com\"\nport = 6667\nchannel = \"#2d\"\n\nnickList = [\"Schwarzer|Regen\",\n \"Gundam\",\n \"EVA01\",\n \"Armslave\",\n \"Knightmare\",\n \"TSF\",\n \"Flag\",\n \"Rafale Reload\"]\n\nbuffer_size = 1024\n\ncharset = \"utf-8\"\n\ntell_timeout = 60 * 60 * 24 * 14\n\ncooldown = 60 * 30\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1592,"cells":{"__id__":{"kind":"number","value":3917010221962,"string":"3,917,010,221,962"},"blob_id":{"kind":"string","value":"24cf993f22921d6036f614d3653c627c7deae5d6"},"directory_id":{"kind":"string","value":"de7288e3fdaeb8e6a65d2e45c7bc0097a145dccf"},"path":{"kind":"string","value":"/python/ov.py"},"content_id":{"kind":"string","value":"a1d4dbf5b6b2bce3478cd3e205f17636380031b6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"magictimelapse/dehumidify"},"repo_url":{"kind":"string","value":"https://github.com/magictimelapse/dehumidify"},"snapshot_id":{"kind":"string","value":"98a98d0e870676494baca9fb810d1214ede3286a"},"revision_id":{"kind":"string","value":"991f5c688092cd84b84b5e29b7ac936334215fed"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T20:54:12.786913","string":"2016-09-05T20:54:12.786913"},"revision_date":{"kind":"timestamp","value":"2014-11-14T21:00:30","string":"2014-11-14T21:00:30"},"committer_date":{"kind":"timestamp","value":"2014-11-14T21:00:30","string":"2014-11-14T21:00:30"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import arduinoControl\n\nac = arduinoControl.ArduinoControl()\n\ndef oo():\n ac.switchOnOff()\n\ndef sb():\n ac.switchButton()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1593,"cells":{"__id__":{"kind":"number","value":4123168649509,"string":"4,123,168,649,509"},"blob_id":{"kind":"string","value":"dd4a58e9248c84fbc6e1ea374bb82cd0db197a43"},"directory_id":{"kind":"string","value":"18b9ab4a59a7046fca6d7bda5cb8629956b32c99"},"path":{"kind":"string","value":"/ohloh-app/Project.py"},"content_id":{"kind":"string","value":"874e7553ee81d6a128a61b9bbe82bf9aceb06486"},"detected_licenses":{"kind":"list like","value":["MIT"],"string":"[\n \"MIT\"\n]"},"license_type":{"kind":"string","value":"permissive"},"repo_name":{"kind":"string","value":"4mba/practice"},"repo_url":{"kind":"string","value":"https://github.com/4mba/practice"},"snapshot_id":{"kind":"string","value":"4ceca546606097e81130bbf348483eca18999fed"},"revision_id":{"kind":"string","value":"325abd698fe8b78e9e49a7c70f1898d9f3193021"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-06-12T21:24:31.256547","string":"2020-06-12T21:24:31.256547"},"revision_date":{"kind":"timestamp","value":"2013-07-08T23:39:19","string":"2013-07-08T23:39:19"},"committer_date":{"kind":"timestamp","value":"2013-07-08T23:39:19","string":"2013-07-08T23:39:19"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport sys\nimport urllib\nimport elementtree.ElementTree as ET\n\n\nclass Project:\n search = str()\n api_key = \"fUCeo1tkdu5ziEwCTO1A\"\n url = \"http://www.ohloh.net/p.xml?api_key=\"+api_key+\"&query=\"\n license_base = \"https://www.ohloh.net/licenses/\"\n \n \n\n def __init__(self, search):\n \"\"\" 오픈소스 프로젝트 검색어인 'search'를 Project 멤버인 \n self.search에 초기화 한다.\n \"\"\"\n self.search = search\n \n ## Return a list of OSS project information (JSON Type)\n def get_project(self, pageIndex):\n \"\"\" 클래스 생성자에 초기화된 search값의 오픈소스 프로젝트를 검색한다.\n pageIndex는 검색결과의 몇번째 페이지를 보여줄 것인지를 결정한다. \n \"\"\"\n url = self.url + self.search+\"&page=\"+str(pageIndex)\n f = urllib.urlopen(url)\n \n # Parse the response into a structured XML object\n tree = ET.parse(f)\n \n # Did Ohloh return an error?\n elem = tree.getroot()\n error = elem.find(\"error\")\n if error != None:\n print 'Ohloh returned:', ET.tostring(error),\n sys.exit()\n \n # project header\n header = dict()\n project_list = list()\n \n header['items_available'] = elem.find(\"items_available\").text \n header['items_returned'] = elem.find(\"items_returned\").text \n header['first_item_position'] = int(elem.find(\"first_item_position\").text) \n \n # Output all the immediate child properties of an Account\n for projects in elem.findall(\"result/project\"):\n data = dict()\n data['header'] = header\n data['id'] = projects.find(\"id\").text\n data['name'] = projects.find(\"name\").text \n data['homepage_url'] = projects.find(\"homepage_url\").text\n #data['description'] = (projects.find(\"description\").text).replace('\"','')\n data['description'] = \" TEST \"\n\n # 로고 정보를 가지고 있지 않은 프로젝트도 있음 \n if(ET.iselement(projects.find(\"medium_logo_url\"))):\n data['medium_logo_url'] = projects.find(\"medium_logo_url\").text\n data['small_logo_url'] = projects.find(\"small_logo_url\").text\n else:\n data['medium_logo_url'] = \"#\"\n data['small_logo_url'] = \"#\"\n \n data['ohloh_url'] = \"https://www.ohloh.net/p/\"+data['id']\n \n licenses = list()\n # Multi-License parsing\n for item in projects.findall(\"licenses/license\"): \n license = dict()\n license['name'] = item.find(\"name\").text\n license['nice_name'] = item.find(\"nice_name\").text\n license['license_url'] = self.license_base+item.find(\"name\").text\n licenses.append(license)\n \n # 라이선스 정보가 아예 없을경우에는 'unknown' 으로 표시함 \n if (len(licenses) == 0):\n item = dict()\n item['name'] = 'unknown'\n item['nice_name'] = 'unknown'\n item['license_url'] = '#'\n licenses.append(item)\n\n data['licenses'] = licenses\n project_list.append(data)\n\n ## Return arrays of JSON type string data\n return project_list\n \n\n def save_result(search_result):\n \"\"\" do save \"\"\"\n return None\n "},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1594,"cells":{"__id__":{"kind":"number","value":3676492029463,"string":"3,676,492,029,463"},"blob_id":{"kind":"string","value":"165c9f9a03e5854f2823252f2d8bf85dd8f76b3b"},"directory_id":{"kind":"string","value":"5443c099e65930c7c2ee70e16b7d8b1c5c4261be"},"path":{"kind":"string","value":"/mysite/events/urls.py"},"content_id":{"kind":"string","value":"c4c328b6c6913670e9c36524917ef05c199068f1"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"vmi356/ajax_calendar"},"repo_url":{"kind":"string","value":"https://github.com/vmi356/ajax_calendar"},"snapshot_id":{"kind":"string","value":"d84cd1623a409af873d6be65444c54531c0f977e"},"revision_id":{"kind":"string","value":"3e8d0b4f77b6de2e9bd28a6fdc7f051cbe352e8a"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2021-01-23T06:44:27.007344","string":"2021-01-23T06:44:27.007344"},"revision_date":{"kind":"timestamp","value":"2013-05-18T03:14:43","string":"2013-05-18T03:14:43"},"committer_date":{"kind":"timestamp","value":"2013-05-18T03:14:43","string":"2013-05-18T03:14:43"},"github_id":{"kind":"null"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import url, patterns, include\nfrom api import EventResource, CategoryResourse\nfrom django.views.generic import TemplateView\nfrom tastypie.api import Api\n\nv1_api = Api(api_name='v1')\nv1_api.register(EventResource())\nv1_api.register(CategoryResourse())\n\nurlpatterns = patterns('',\n url(r'^$', TemplateView.as_view(template_name='month.html'), name='month'),\n url(r'^api/', include(v1_api.urls)),\n)\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2013,"string":"2,013"}}},{"rowIdx":1595,"cells":{"__id__":{"kind":"number","value":7533372675551,"string":"7,533,372,675,551"},"blob_id":{"kind":"string","value":"ba38f5923dc9b608f3b0ca1ab64fceef9ee90022"},"directory_id":{"kind":"string","value":"a8fe9201daa185f611b361afe7c60acf559f1323"},"path":{"kind":"string","value":"/apheleia/projection/transform.py"},"content_id":{"kind":"string","value":"db08540be2a7696be02f3ead303090eb9286a9b6"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"lidavidm/apheleia"},"repo_url":{"kind":"string","value":"https://github.com/lidavidm/apheleia"},"snapshot_id":{"kind":"string","value":"6dd02f72f44baef010ec7412ee3cba008c6aa5d1"},"revision_id":{"kind":"string","value":"ffee3d1cc292a6c60eecf82c2a4e1488c9f572e3"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-05T18:49:25.010974","string":"2016-09-05T18:49:25.010974"},"revision_date":{"kind":"timestamp","value":"2012-01-08T17:42:31","string":"2012-01-08T17:42:31"},"committer_date":{"kind":"timestamp","value":"2012-01-08T17:42:31","string":"2012-01-08T17:42:31"},"github_id":{"kind":"number","value":3083185,"string":"3,083,185"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"import pyglet\nfrom pyglet import gl\n\n\nclass TransformGroup(pyglet.graphics.Group):\n def __init__(self, projection, parent=None):\n super().__init__(parent)\n self.projection = projection\n self.transforms = []\n\n def add(self, transform, **kwargs):\n self.transforms.append((transform, kwargs))\n return kwargs\n\n def set_state(self):\n for t, kwargs in self.transforms:\n t.apply(self.projection, kwargs)\n\n def unset_state(self):\n for t, kwargs in reversed(self.transforms):\n t.remove(self.projection, kwargs)\n\n\nclass transform:\n def __init__(self, func, inverse=lambda p, d, k: None, state={}):\n self.name = func.__name__\n self.func = func\n self.inv = inverse\n self.state = state\n\n def inverse(self, inverse):\n self.inverse = inverse\n return self\n\n def apply(self, projection, kwargs):\n self.func(projection, self.state, kwargs)\n\n def remove(self, projection, kwargs):\n self.inv(projection, self.state, kwargs)\n\n\n@transform\ndef rotate(p, state, kwargs):\n x, y = p.x, p.y\n xp = x + (p.width / 2)\n yp = y + (p.height / 2)\n gl.glTranslatef(xp, yp, 0)\n gl.glRotatef(kwargs['angle'], 0.0, 0.0, 1.0)\n gl.glTranslatef(-xp, -yp, 0)\n\n\n@transform\ndef clearModelview(p, state, kwargs):\n gl.glMatrixMode(gl.GL_MODELVIEW)\n gl.glPushMatrix()\n gl.glLoadIdentity()\n\n\n@clearModelview.inverse\ndef clearModelview(p, state, kwargs):\n gl.glPopMatrix()\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1596,"cells":{"__id__":{"kind":"number","value":6648609420702,"string":"6,648,609,420,702"},"blob_id":{"kind":"string","value":"a719ccab6b8b41551f205f5124789f45332e1c9c"},"directory_id":{"kind":"string","value":"3280c217294b0cd57557bb37d61ace6334610d70"},"path":{"kind":"string","value":"/SO11/src/Program.py"},"content_id":{"kind":"string","value":"2eed2f823faa7963ae235829efbba3831944a0e7"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"LeonhardtDavid/so-david-leonhardt"},"repo_url":{"kind":"string","value":"https://github.com/LeonhardtDavid/so-david-leonhardt"},"snapshot_id":{"kind":"string","value":"4f7fa3a64edc4c7dc704cbca773e28385b220129"},"revision_id":{"kind":"string","value":"e5a7724684f2ee2acda9aed4da051a0451fc96cb"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-13T16:19:10.305924","string":"2020-04-13T16:19:10.305924"},"revision_date":{"kind":"timestamp","value":"2012-02-15T18:46:25","string":"2012-02-15T18:46:25"},"committer_date":{"kind":"timestamp","value":"2012-02-15T18:46:25","string":"2012-02-15T18:46:25"},"github_id":{"kind":"number","value":32116864,"string":"32,116,864"},"star_events_count":{"kind":"number","value":0,"string":"0"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"class Prog:\r\n def __init__(self, priority, name, code=[], vars=[]):\r\n self.name = name\r\n self.priority = priority\r\n self.code = code\r\n self.vars = vars\r\n self.maxBurst = self.calculateMaxBurst()\r\n self.lenght = self.calculateLenght()\r\n \r\n def calculateMaxBurst(self):\r\n maxBurst = 0\r\n for inst in self.code:\r\n if inst.cpuBurst() > maxBurst:\r\n maxBurst = inst.cpuBurst()\r\n return maxBurst\r\n \r\n def calculateLenght(self):\r\n lenght = 0\r\n for inst in self.code:\r\n lenght += inst.burst\r\n lenght += len(self.vars)\r\n return lenght\r\n \r\n def __repr__(self):\r\n return 'Prog \"%s\" %s' % (self.name, self.code)"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1597,"cells":{"__id__":{"kind":"number","value":1554778180256,"string":"1,554,778,180,256"},"blob_id":{"kind":"string","value":"8185bbadac50c164e1609eb867a1a4ff62e7321f"},"directory_id":{"kind":"string","value":"eb2e58926969324de13a7909f8fc165e1b6b4a37"},"path":{"kind":"string","value":"/setup.py"},"content_id":{"kind":"string","value":"1eeea5c571a102625e3986bbfbb01ada6c0b3c14"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"DavidWittman/cloudservers-hostsgen"},"repo_url":{"kind":"string","value":"https://github.com/DavidWittman/cloudservers-hostsgen"},"snapshot_id":{"kind":"string","value":"f4feb227408ef560ea4f3ce564366f02ced061e9"},"revision_id":{"kind":"string","value":"e35b4f2855dd9ce6d46e762b8100162dad9be558"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-05-03T23:15:45.694996","string":"2020-05-03T23:15:45.694996"},"revision_date":{"kind":"timestamp","value":"2014-04-25T22:51:36","string":"2014-04-25T22:51:36"},"committer_date":{"kind":"timestamp","value":"2014-04-25T22:51:36","string":"2014-04-25T22:51:36"},"github_id":{"kind":"number","value":2595770,"string":"2,595,770"},"star_events_count":{"kind":"number","value":2,"string":"2"},"fork_events_count":{"kind":"number","value":2,"string":"2"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"bool","value":false,"string":"false"},"gha_event_created_at":{"kind":"timestamp","value":"2014-04-25T22:48:35","string":"2014-04-25T22:48:35"},"gha_created_at":{"kind":"timestamp","value":"2011-10-17T23:54:37","string":"2011-10-17T23:54:37"},"gha_updated_at":{"kind":"timestamp","value":"2014-04-25T22:48:35","string":"2014-04-25T22:48:35"},"gha_pushed_at":{"kind":"timestamp","value":"2014-04-25T22:48:35","string":"2014-04-25T22:48:35"},"gha_size":{"kind":"number","value":113,"string":"113"},"gha_stargazers_count":{"kind":"number","value":6,"string":"6"},"gha_forks_count":{"kind":"number","value":3,"string":"3"},"gha_open_issues_count":{"kind":"number","value":1,"string":"1"},"gha_language":{"kind":"string","value":"Python"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\n__author__ = \"David Wittman \"\nNAME = \"cloudservers-hostsgen\"\nDESC = \"Auto-generates /etc/hosts for Rackspace Cloud Servers\"\nVERSION = \"1.0.1\"\nREQS = [ 'python-novaclient==2.6.0' ]\n\nsetup(name = NAME,\n description = DESC,\n version = VERSION, \n author = \"David Wittman\",\n author_email = \"david@wittman.com\",\n license = \"BSD\",\n install_requires = REQS,\n py_modules = ['hostsgen'],\n entry_points = \"\"\"\n [console_scripts]\n cloudservers-hostsgen = hostsgen:main\n \"\"\"\n )\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}},{"rowIdx":1598,"cells":{"__id__":{"kind":"number","value":10058813433968,"string":"10,058,813,433,968"},"blob_id":{"kind":"string","value":"48f5db5cb27651393ee2c70711a83831d48f769b"},"directory_id":{"kind":"string","value":"7442c510d18b773bfd88570d0fbe1065947c55a1"},"path":{"kind":"string","value":"/solutions/p45.py"},"content_id":{"kind":"string","value":"aad255f69d3c8d6abccbae6e4a9cd78a8797e269"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"Bryukh/euler"},"repo_url":{"kind":"string","value":"https://github.com/Bryukh/euler"},"snapshot_id":{"kind":"string","value":"dece4fa147eba44622ac940b3b8f303a4c8069cc"},"revision_id":{"kind":"string","value":"5bb0420a52e32758279c5bfb49e008e56ce5b1fc"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2020-04-05T22:50:52.183253","string":"2020-04-05T22:50:52.183253"},"revision_date":{"kind":"timestamp","value":"2012-08-31T09:32:20","string":"2012-08-31T09:32:20"},"committer_date":{"kind":"timestamp","value":"2012-08-31T09:32:20","string":"2012-08-31T09:32:20"},"github_id":{"kind":"number","value":1509830,"string":"1,509,830"},"star_events_count":{"kind":"number","value":3,"string":"3"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"#-*- encoding: utf8 -*-\n\"\"\"\nTriangle, pentagonal, and hexagonal numbers are generated by the following formulae:\n\nTriangle\t \tTn=n(n+1)/2\t \t1, 3, 6, 10, 15, ...\nPentagonal\t \tPn=n(3n-1)/2\t \t1, 5, 12, 22, 35, ...\nHexagonal\t \tHn=n(2n-1)\t \t1, 6, 15, 28, 45, ...\nIt can be verified that T285 = P165 = H143 = 40755.\n\nFind the next triangle number that is also pentagonal and hexagonal.\n\"\"\"\n\nfrom eulerfunc import triangle, ispentagonal, ishexagonal\n\ndef solution():\n \"\"\"\n Bryukh's solution\n \"\"\"\n for i in xrange(286, 100000):\n tr = triangle(i)\n if ispentagonal(tr) and ishexagonal(tr):\n return tr\n return None\n\n\nif __name__ == '__main__':\n pass\n"},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2012,"string":"2,012"}}},{"rowIdx":1599,"cells":{"__id__":{"kind":"number","value":9363028748213,"string":"9,363,028,748,213"},"blob_id":{"kind":"string","value":"61bf2e292f91077cfac3a009072dbc6be8c0ed17"},"directory_id":{"kind":"string","value":"9d767c7df630aa7782264cc51073065e1f5d4c5d"},"path":{"kind":"string","value":"/mlia-examples/src/book/regression/treeregress.py"},"content_id":{"kind":"string","value":"e623432dc69009321762728aac280e27372e6dc5"},"detected_licenses":{"kind":"list like","value":[],"string":"[]"},"license_type":{"kind":"string","value":"no_license"},"repo_name":{"kind":"string","value":"GomesNayagam/workspace"},"repo_url":{"kind":"string","value":"https://github.com/GomesNayagam/workspace"},"snapshot_id":{"kind":"string","value":"497e6eaad2785875a02f870cd384516b72501110"},"revision_id":{"kind":"string","value":"d23e806cbbe0decc8a34bcd61636468a46f439a4"},"branch_name":{"kind":"string","value":"refs/heads/master"},"visit_date":{"kind":"timestamp","value":"2016-09-06T17:45:52.800243","string":"2016-09-06T17:45:52.800243"},"revision_date":{"kind":"timestamp","value":"2014-09-25T13:51:20","string":"2014-09-25T13:51:20"},"committer_date":{"kind":"timestamp","value":"2014-09-25T13:51:20","string":"2014-09-25T13:51:20"},"github_id":{"kind":"number","value":24454554,"string":"24,454,554"},"star_events_count":{"kind":"number","value":1,"string":"1"},"fork_events_count":{"kind":"number","value":0,"string":"0"},"gha_license_id":{"kind":"null"},"gha_fork":{"kind":"null"},"gha_event_created_at":{"kind":"null"},"gha_created_at":{"kind":"null"},"gha_updated_at":{"kind":"null"},"gha_pushed_at":{"kind":"null"},"gha_size":{"kind":"null"},"gha_stargazers_count":{"kind":"null"},"gha_forks_count":{"kind":"null"},"gha_open_issues_count":{"kind":"null"},"gha_language":{"kind":"null"},"gha_archived":{"kind":"null"},"gha_disabled":{"kind":"null"},"content":{"kind":"string","value":"from __future__ import division\nfrom numpy import *\n\nclass TreeNode():\n def __init__(self, feat, val, right, left):\n featureToSplitOn = feat\n valueOfSplit = val\n rightBranch = right\n leftBranch = left\n\ndef loadDataSet(filename):\n data = []\n for line in open(filename).readlines():\n data.append(map(lambda x: float(x), line.strip().split(\"\\t\")))\n return data\n\ndef binSplitDataSet(dataset, feature, value):\n mat0 = dataset[nonzero(dataset[:, feature] > value)[0], :][0]\n mat1 = dataset[nonzero(dataset[:, feature] <= value)[0], :][0]\n return mat0, mat1\n\ndef modelErr(dataSet):\n ws,X,Y = linearSolve(dataSet)\n yHat = X * ws\n return sum(power(Y - yHat,2))\n\ndef regLeaf(dataSet):\n return mean(dataSet[:, -1])\n\ndef regErr(dataSet):\n return var(dataSet[:, -1]) * shape(dataSet)[0]\n\ndef chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1,4)):\n tolS = ops[0]\n tolN = ops[1]\n # if all the target variables are the same value: quit and return value\n if len(set(dataSet[:, -1].T.tolist()[0])) == 1: #exit cond 1\n return None, leafType(dataSet)\n m, n = shape(dataSet)\n # the choice of the best feature is driven by Reduction in RSS error from mean\n S = errType(dataSet)\n bestS = inf\n bestIndex = 0\n bestValue = 0\n for featIndex in range(n-1):\n for splitVal in set(dataSet[:, featIndex]):\n mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)\n if shape(mat0)[0] < tolN or shape(mat1)[0] < tolN:\n continue\n newS = errType(mat0) + errType(mat1)\n if newS < bestS:\n bestIndex = featIndex\n bestValue = splitVal\n bestS = newS\n # if the decrease (S-bestS) is less than a threshold don't do the split\n if (S - bestS) < tolS:\n return None, leafType(dataSet) #exit cond 2\n mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)\n if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): #exit cond 3\n return None, leafType(dataSet)\n return bestIndex,bestValue #returns the best feature to split on\n #and the value used for that split\n\ndef createTree(dataset, leafType=regLeaf, errType=regErr, ops=(1,4)):\n feat, val = chooseBestSplit(dataset, leafType, errType, ops)\n if feat == None:\n return val\n retTree = {}\n retTree[\"spInd\"] = feat\n retTree[\"spVal\"] = val\n lset, rset = binSplitDataSet(dataset, feat, val)\n retTree[\"left\"]= createTree(lset, leafType, errType, ops)\n retTree[\"right\"]= createTree(rset, leafType, errType, ops)\n return retTree\n\ndef isTree(obj):\n return type(obj).__name__ == \"dict\"\n\ndef getMean(tree):\n if isTree(tree[\"right\"]):\n tree[\"right\"] = getMean(tree[\"right\"])\n if isTree(tree[\"left\"]):\n tree[\"left\"] = getMean(tree[\"left\"])\n return (tree[\"left\"] + tree[\"right\"]) / 2\n\ndef prune(tree, testData):\n if shape(testData)[0] == 0:\n return getMean(tree)\n if isTree(tree['right']) or isTree(tree['left']):\n lSet, rSet = binSplitDataSet(testData, tree[\"spInd\"], tree[\"spVal\"])\n if isTree(tree['left']):\n tree['left'] = prune(tree['left'], lSet)\n if isTree(tree['right']):\n tree['right'] = prune(tree['right'], rSet)\n if not isTree(tree['left']) and not isTree(tree['right']):\n lSet, rSet = binSplitDataSet(testData, tree[\"spInd\"], tree[\"spVal\"])\n errorNoMerge = sum(power(lSet[:,-1] - tree['left'],2)) + \\\n sum(power(rSet[:,-1] - tree['right'],2))\n treeMean = (tree['left'] + tree['right']) / 2.0\n errorMerge = sum(power(testData[:, -1] - treeMean, 2))\n if errorMerge < errorNoMerge:\n print \"merging\"\n return treeMean\n else: return tree\n else:\n return tree\n\ndef linearSolve(data):\n m, n = shape(data)\n X = mat(ones((m, n)))\n Y = mat(ones((m, 1)))\n X[:, 1:n] = data[:, 0:n-1]\n Y = data[:, -1]\n xTx = X.T * X\n if linalg.det(xTx) == 0.0:\n raise NameError(\"singular matrix, can't invert, \" +\n \"try increasing second value of ops\")\n ws = xTx.I * (X.T * Y)\n return ws, X, Y\n\ndef modelLeaf(data):\n ws, X, Y = linearSolve(data)\n return ws\n\ndef modelErr(data):\n ws, X, Y = linearSolve(data)\n yHat = X * ws\n return sum(power(Y - yHat, 2))\n\ndef regTreeEval(model, data):\n return float(model)\n\ndef modelTreeEval(model, data):\n n = shape(data)[1]\n X = mat(ones((1, n + 1)))\n X[:, 1:n+1] = data\n return float(X * model)\n\ndef treeForecast(tree, data, modelEval=regTreeEval):\n if not isTree(tree):\n return modelEval(tree, data)\n if data[tree[\"spInd\"]] > tree[\"spVal\"]:\n if isTree(tree[\"left\"]):\n return treeForecast(tree[\"left\"], data, modelEval)\n else:\n return modelEval(tree[\"left\"], data)\n else:\n if isTree(tree[\"right\"]):\n return treeForecast(tree[\"right\"], data, modelEval)\n else:\n return modelEval(tree[\"right\"], data)\n\ndef createForecast(tree, testData, modelEval=regTreeEval):\n m = len(testData)\n yHat = mat(zeros((m, 1)))\n for i in range(0, m):\n yHat[i, 0] = treeForecast(tree, testData[i], modelEval)\n return yHat\n\ndef main():\n #testMat = amat(eye(4))\n #print testMat\n #mat0, mat1 = binSplitDataSet(testMat, 1, 0.5)\n #print \"mat0=\", mat0\n #print \"mat1=\", mat1\n\n #tree = createTree(mat(loadDataSet(\"ex00.txt\")))\n #print tree\n #tree2 = createTree(mat(loadDataSet(\"ex0.txt\")))\n #print tree2\n #tree3 = createTree(mat(loadDataSet(\"ex0.txt\")), ops=[0, 1])\n #print tree3\n\n # first call creates many leaves, second creates 2\n #tree4 = createTree(mat(loadDataSet(\"ex2.txt\")))\n #print tree4\n #tree5 = createTree(mat(loadDataSet(\"ex2.txt\")), ops=[10000, 4])\n #print tree5\n\n #tree6 = createTree(mat(loadDataSet(\"ex2.txt\")), ops=[0, 1])\n #testData = mat(loadDataSet(\"ex2test.txt\"))\n #prune(tree6, testData)\n #print tree6\n\n ## model trees\n #datamatrix = mat(loadDataSet(\"exp2.txt\"))\n #tree7 = createTree(datamatrix, modelLeaf, modelErr, (1, 10))\n #print tree7\n\n ## bike speeds\n trainmatrix = mat(loadDataSet(\"bikeSpeedVsIq_train.txt\"))\n testmatrix = mat(loadDataSet(\"bikeSpeedVsIq_test.txt\"))\n # reg tree\n tree = createTree(trainmatrix, ops=(1, 20))\n yHat = createForecast(tree, testmatrix[:, 0])\n print \"r-squared(reg)=\", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]\n # model tree\n mtree = createTree(trainmatrix, modelLeaf, modelErr, (1, 20))\n yHat = createForecast(mtree, testmatrix[:, 0], modelTreeEval)\n print \"r-squared(model)=\", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]\n # linear solver\n ws, X, Y = linearSolve(trainmatrix)\n for i in range(shape(testmatrix)[0]):\n yHat[i] = testmatrix[i,0] * ws[1,0] + ws[0,0]\n print \"r-squared(lin)=\", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]\n\nif __name__ == \"__main__\":\n main()\n "},"src_encoding":{"kind":"string","value":"UTF-8"},"language":{"kind":"string","value":"Python"},"is_vendor":{"kind":"bool","value":false,"string":"false"},"is_generated":{"kind":"bool","value":false,"string":"false"},"year":{"kind":"number","value":2014,"string":"2,014"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":15,"numItemsPerPage":100,"numTotalItems":42509,"offset":1500,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NjQyNzY5OSwic3ViIjoiL2RhdGFzZXRzL2xvdWJuYWJubC9vbGRfcHl0aG9uIiwiZXhwIjoxNzU2NDMxMjk5LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.gdIh96RDVXLBAPfd65d1HDWu7kAtKJIffwW5lQQfXiTbRpnrEfQ65-hIGQ8NVsgcWjikOGxsQ6oFdnryPRzIDQ","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
"""
Author: Jatin Shah
Date: 10/02/2010
Description:
"""
import datetime
import goodluck
import goodluck.goodluckdbconnector as my
from PlayerLogin import PlayerLogin
from goodluck.LogMsg import Log
# very important module for encoding python object directly to Javascript object and
# vice versa
import simplejson as json
#Web-Server Script Engine Specific Modueles
#to test module in python interpreter, remove/comment them
try:
from mod_python import apache, psp, Session
except ImportError:
# direct execution from python shell
pass
class DrawMaster:
def __init__(self, req = None):
self.req = req
self.dt = datetime.datetime(1, 1, 1)
self.logger = Log("c:\\playerlogin.txt")
if not self.req is None:
self.pxlog("\n"+"======================"*10+"\n")
self.pxlog("DrawMaster Module instantiated")
self.response = apache.OK
self.req.content_type = "text/html"
def pxlog(self,logstr):
if goodluck.TRACELOG:
self.logger.logmsg("["+goodluck.Now()+"]"+logstr+"\n")
def getLastDrawResults(self):
"""
@overlooked,maintained and modified by Ronak Patel
1 -> successfull operation
500 -> player session expired
"""
self.pxlog("Function getLastDrawResults Called")
response = {}
sid, cardid = 0, 0 #default sessionId and cardId for database Usage.
results = []
response['result'] = results
if not self.req is None:
if not self.req.session.is_new():
self.pxlog("Session is not new "+self.req.session.id())
#self.req.session.load()
#self.req.session.set_timeout(goodluck.TIMEOUT);
#self.req.session.save(); #It is very important to save session.
else:
self.pxlog("Session is new "+self.req.session.id())
self.pxlog("Sid from client = "+self.req.form['sid'])
PlayerLogin().logout(self.req.form['sid'])
response[responseCode] = goodluck.SESSION_EXPIRED
self.req.write(json.dumps( [response] ))
return #leave for this request doesn't provide with session details
rs = my.proc("proc_ssl_last_draw_results();");
if rs.recordcount > 0:
i = rs.recordcount;
self.pxlog("procedure result count="+str(i))
response['responseCode'] = goodluck.SUCCESS
while 0 < i:
results.append({'drawname':rs.Fields['drawname'],
'drawnumber':rs.Fields['drawnumber'],
'drawticketnumber':rs.Fields['drawticketnumber'],
'drawtime':str(rs.Fields['drawtime'])
}
)
rs.next()
i -= 1
else:
response['responseCode'] = goodluck.NOREC
reply = json.dumps( [response] )
self.pxlog("Function getLastDrawResults ends here")
if not self.req is None:
self.req.write(reply)
else:
return reply
def getSessDetails(self):
if not self.req is None:
self.pxlog("Funciton getSessDetails called")
self.pxlog("Request Parameters => "+str(self.req.the_request))
self.pxlog("Request Headers =>"+str(self.req.headers_in))
self.pxlog("Response Headers =>"+str(self.req.headers_out))
response = {}
clisid = self.req.form['sid']
self.pxlog("inquiring sid : "+clisid)
if not self.req.session.is_new():
self.pxlog("Session is not new")
rsid = self.req.session['sid']
response['responseCode'] = goodluck.SUCCESS
response['rsid'] = rsid
response['cardid'] = self.req.session['cardid']
else:
self.pxlog("Session is new")
if clisid > 0:
res = PlayerLogin().logout(int(clisid))
response['responseCode'] = goodluck.SESSION_EXPIRED
self.req.write(json.dumps([response]))
self.pxlog("Function getSessDetails ends here")
else:
return "This is not a web request"
def getAdvDrawList(self):
response = {}
sid = 0
cardid = 0
conn = my.connect()
if not self.req is None:
if self.req.session.is_new():
response['result'] = goodluck.SESSION_EXPIRED
self.req.session.invalidate()
self.req.write(json.dumps([response]))
return
else:
if self.req.form['addt'] == "null":
addt = "null"
else:
addt = "\'"+self.req.form['addt']+"\'"
if self.req.form['adtm'] == "null":
adtm = "null"
else:
adtm = "\'"+self.req.form['adtm']+"\'"
rs = my.proc("proc_getAdvDraw(%s,%s)"%(addt,adtm))
if rs.recordcount > 0:
result = rs.Fields['result']
response['result']=result
if result == 1:
i = 0
drawlist = []
while i < rs.recordcount:
drawlist.append({'id' : rs.Fields['drawid'],
'drawname':rs.Fields['drawname'],
'drawtime':str(rs.Fields['drawtime'])
})
rs.next()
i += 1
response['draws'] = drawlist
my.destroy(rs)
self.req.write(json.dumps([response]))
def getCurrentDrawList(self):
"""
@author Jatin Shah
@date 10/02/2010
@brief
Response Codes for various operations
1 -> successfull operation
500 -> session has expired
@overlooked,maintained and modified by Ronak Patel
"""
response = {} #by-default 0 is stands for success in on both client and server scripts.
sid = 0 #default sessionId for database usage
cardid = 0
conn = my.connect()
if not self.req is None:
self.pxlog("Function getCurrentDrawList Called")
if self.req.form.has_key('sid'):
try:
sid = int(self.req.form['sid'])
cardid = self.req.session['cardid']
except:
sid = 0
pass
self.pxlog("params = sid : "+str(sid)+"cardid : "+str(cardid))
#check for exists session in web request
if self.req.session.is_new():
self.pxlog("Session is new "+self.req.session.id())
PlayerLogin().logout(int(sid))
response['responseCode'] = goodluck.SESSION_EXPIRED # session has expired redirect to login screen
self.req.session.invalidate()
self.req.write(json.dumps( [response] ))
return
else:
self.pxlog("Session is not new")
if cardid > 0:
self.pxlog("Calling proc_ssl_get_card_balance")
rs = my.proc('proc_ssl_get_card_balance(%d);' % (cardid), conn)
if rs.recordcount > 0:
self.req.session['balance'] = rs.Fields['balance']
self.pxlog("bal="+str(rs.Fields['balance']))
rs.nextset()
response['sid'] = self.req.session['sid'] #sending database SID for reference to playerscreen
response['balance'] = self.req.session['balance'] #sending account balance to playerscreen
## self.req.session.set_timeout(goodluck.TIMEOUT)
## self.req.session.save() #
##Pickout LOTTERY DRAWLIST data from database with this
self.pxlog("proc_ssl_current_draws")
rs.execute("call proc_ssl_current_draws();")
cdt = datetime.datetime(1, 1, 1).now()
data = []
i = rs.recordcount
if i > 0:
seconds = rs.Fields['drawtime'].seconds;
seconds = seconds - (cdt.hour * 60 * 60) - (cdt.minute * 60) - cdt.second
data.append(seconds / 60)
data.append(seconds % 60)
drawlist = []
while i > 0:
drawlist.append( {'id' : rs.Fields['drawid'],
'drawname': rs.Fields['drawname'],
'drawtime': str(rs.Fields['drawtime'])} )
rs.next()
i -= 1
data.append(drawlist)
my.destroy(rs) #destroy lottery recordset using factory function
my.destroy(conn)
response['data'] = data
response['responseCode'] = goodluck.SUCCESS
replydata = json.dumps( [response] )
#check this module is execute from apache server or from python
self.pxlog("Function getCurrentDrawList ends here")
if not self.req is None:
self.req.write(replydata) # Encode Python list to javascript object notation
else:
return replydata
if __name__ == '__main__':
"Unit Testing code"
dm = DrawMaster(None)
print dm.getLastDrawResults()
## print dm.getCurrentDrawList()
UTF-8
Python
false
false
2,011
3,822,520,917,898
7f035d9d437d996d26bd8f963d37155ce4818e19
5b41c18972a223b68cb0dd002da51d39fa489cf6
/problem007.py
65986131fa478eb22439757acfb1814b3c55d303
[]
no_license
punchagan/project-euler
https://github.com/punchagan/project-euler
b3e01f01c7136e242b826fa6649cc47578fb2b6d
5cd4f735d2f5eddd2a4279b63ef31b9569402a8c
refs/heads/master
2016-09-06T06:27:26.498863
2011-05-20T11:39:47
2011-05-20T11:39:47
1,776,195
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python
#Problem 7!
#What is the 10001st prime number?
import math
number=10001
a=[2]
n=3
while len(a)<number:
for i in range(0,len(a)):
if n%a[i]==0:
prime=0
break
else:
prime=1
if prime:
a+=[n]
# print n, "\t", len(a)
n+=2
print a[-1]
#Any prime greater than 3 can be written as 6k+/-1. You could make use of that.
UTF-8
Python
false
false
2,011
16,183,436,808,358
73e83cde333e71f438d623407b9eac96b8507161
ee8d390e418679fbbcdddc9c12ab245c1f341857
/customquery/utils.py
9866615420da8bced24fead9a768f680d90d80e6
[]
no_license
lovasb/django-customquery
https://github.com/lovasb/django-customquery
ee11391c90683c73c81a9a31ed4c68fd17b32f08
3acecbecd1ff4fd4312610d2c61853cdf80f3385
refs/heads/master
2015-08-04T15:40:53.201620
2012-11-28T11:56:54
2012-11-28T11:56:54
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys
from django.db.models import *
from django.db.models.loading import get_models
from django.db.models.query import ValuesListQuerySet, ValuesQuerySet
for m in get_models():
exec "from %s import %s" % (m.__module__, m.__name__)
def get_query_result(request, queries):
retval, error, row = None, None, 0
for i in range(0, len(queries)):
try:
if i == len(queries) - 1:
retval = eval(queries[i])
else:
exec(queries[i])
except:
typ, value, tb = sys.exc_info()
error = typ.__name__, ":", value
row = i
break
#print '\n'.join(traceback.format_tb(tb))
if retval and (not isinstance(retval, (ValuesQuerySet, ValuesListQuerySet))):
retval = None
row = len(queries) - 1
error = "Last query must return ValuesQuerySet [.values()] or ValuesListQuerySet [.values_list()]"
return retval, error, row
# encoding: utf-8
from zope import component
from bungeni.core.workflows.notification import Notification
from bungeni.core.workflows import interfaces
from bungeni.core import globalsettings as prefs
from bungeni.core.i18n import _
import zope.securitypolicy.interfaces
from bungeni.core.workflows import dbutils, utils
class conditions:
@staticmethod
def is_scheduled(info, context):
return dbutils.isItemScheduled(context.motion_id)
class actions:
@staticmethod
def denyAllWrites(motion):
"""
remove all rights to change the question from all involved roles
"""
# rpm = zope.securitypolicy.interfaces.IRolePermissionMap( motion )
# rpm.denyPermissionToRole( 'bungeni.motion.edit', u'bungeni.Owner' )
# rpm.denyPermissionToRole( 'bungeni.motion.edit', u'bungeni.Clerk' )
# rpm.denyPermissionToRole( 'bungeni.motion.edit', u'bungeni.Speaker' )
# rpm.denyPermissionToRole( 'bungeni.motion.edit', u'bungeni.MP' )
# rpm.denyPermissionToRole( 'bungeni.motion.delete', u'bungeni.Owner' )
# rpm.denyPermissionToRole( 'bungeni.motion.delete', u'bungeni.Clerk' )
# rpm.denyPermissionToRole( 'bungeni.motion.delete', u'bungeni.Speaker' )
# rpm.denyPermissionToRole( 'bungeni.motion.delete', u'bungeni.MP' )
@staticmethod
def create( info, context ):
utils.setParliamentId(info, context)
utils.setBungeniOwner(context)
@staticmethod
def submit( info, context ):
utils.setSubmissionDate(info, context)
@staticmethod
def received_by_clerk( info, context ):
utils.createVersion(info, context)
@staticmethod
def require_edit_by_mp( info, context ):
utils.createVersion(info,context)
@staticmethod
def complete( info, context ):
utils.createVersion(info,context)
utils.setSubmissionDate(info, context)
@staticmethod
def approve( info, context ):
utils.setApprovalDate(info,context)
@staticmethod
def adopt(info, context):
utils.createVersion(info,context)
@staticmethod
def reject(info, context):
pass
@staticmethod
def require_amendment( info, context ):
utils.createVersion(info,context)
@staticmethod
def complete_clarify( info, context ):
utils.createVersion(info,context)
@staticmethod
def mp_clarify( info, context ):
utils.createVersion(info,context)
@staticmethod
def schedule( info, context ):
pass
@staticmethod
def reschedule( info, context ):
pass
@staticmethod
def defer( info, context):
pass
@staticmethod
def elapse( info, context ):
pass
@staticmethod
def revert_to_admissible( info, context ):
pass
@staticmethod
def withdraw( info, context ):
pass
class SendNotificationToMemberUponReceipt(Notification):
component.adapts(interfaces.IMotionReceivedEvent)
body = _('notification_email_to_member_upon_receipt_of_motion',
default="Motion received")
@property
def subject(self):
return u'Motion received: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getClerksOfficeEmail()
class SendNotificationToClerkUponSubmit(Notification):
"""Send notification to Clerk's office upon submit.
We need to settings from a global registry to determine whether to
send this notification and where to send it to.
"""
component.adapts(interfaces.IMotionSubmittedEvent)
body = _('notification_email_to_clerk_upon_submit_of_motion',
default="Motion submitted")
@property
def subject(self):
return u'Motion submitted: %s' % self.context.short_name
@property
def condition(self):
return prefs.getClerksOfficeReceiveNotification()
@property
def recipient_address(self):
return prefs.getClerksOfficeEmail()
class SendNotificationToMemberUponReject(Notification):
"""Issued when a motion was rejected by the speakers office.
Sends a notice that the Motion was rejected"""
component.adapts(interfaces.IMotionRejectedEvent)
body = _('notification_email_to_member_upon_rejection_of_motion',
default="Motion rejected")
@property
def subject(self):
return u'Motion rejected: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getSpeakersOfficeEmail()
class SendNotificationToMemberUponNeedsClarification(Notification):
"""Issued when a motion needs clarification by the MP
sends a notice that the motion needs clarification"""
component.adapts(interfaces.IMotionClarifyEvent)
body = _('notification_email_to_member_upon_need_clarification_of_motion',
default="Your motion needs to be clarified")
@property
def subject(self):
return u'Motion needs clarification: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getClerksOfficeEmail()
class SendNotificationToMemberUponDeferred(Notification):
"""Issued when a motion was deferred by Clerk's office."""
component.adapts(interfaces.IMotionDeferredEvent)
body = _('notification_email_to_member_upon_defer_of_motion',
default="Motion deferred")
@property
def subject(self):
return u'Motion deferred: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getSpeakersOfficeEmail()
class SendNotificationToMemberUponSchedule(Notification):
"""Issued when a motion was scheduled by Speakers office.
Sends a Notice that the motion is scheduled for ... """
component.adapts(interfaces.IMotionScheduledEvent)
body = _('notification_email_to_member_upon_schedule_of_motion',
default="Motion scheduled")
@property
def subject(self):
return u'Motion scheduled: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getClerksOfficeEmail()
''' !+ remove, grep for: SendNotificationToMemberUponPostponed IMotionPostponedEvent
class SendNotificationToMemberUponPostponed(Notification):
"""Issued when a motion was postponed by the speakers office.
sends a notice that the motion could not be debated and was postponed"""
component.adapts(interfaces.IMotionPostponedEvent)
body = _('notification_email_to_member_upon_postpone_of_motion',
default="Motion postponed")
@property
def subject(self):
return u'Motion postponed: %s' % self.context.short_name
@property
def condition(self):
return self.context.receive_notification
@property
def from_address(self):
return prefs.getClerksOfficeEmail()
'''
UTF-8
Python
false
false
2,014
3,762,391,374,537
be805ff86ebea4ea6a5a90ff064bbe7bce664169
ee2a95d72c532c499c9144f2fa4fe7751b81111d
/TopicActiveUsers.py
b6ce4240ad54b3a028ecb173a251337be5a5c199
[]
no_license
Pycero91/Segundo
https://github.com/Pycero91/Segundo
42e0d3f7089c36b3ed2238df527a8f3ae77f8830
46284edab92ec5acc2074a9b5ab94e6465eb204a
refs/heads/master
2021-01-23T16:42:40.588140
2014-08-30T19:12:19
2014-08-30T19:12:19
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
__author__ = 'Pycero91'
from twitter import *
OAUTH_TOKEN = YOUR OAUTH_TOKEN
OAUTH_SECRET = YOUR OAUTH_SECRET
CONSUMER_KEY = YOUR CONSUMER_KEY
CONSUMER_SECRET = YOUR CONSUMER_SECRET
t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
class Tweet(object):
def __init__(self, tweet):
self.tweet = tweet
self.username = ''
self.hashtags = []
def getUsername(self):
self.username = self.tweet['user']['screen_name']
return self.username
def getText(self):
self.text = self.tweet['text']
return self.text
def getHashtags(self):
i = 0
for elem in self.tweet['entities']['hashtags']:
self.hashtags.append(self.tweet['entities']['hashtags'][i]['text'])
i += 1
return self.hashtags
class User(object):
def __init__(self, username):
self.username = username
self.tweets = 0
self.timeline = []
def getUsername(self):
return self.username
def getTweets(self):
tw = Tweet(self.getLastTweet())
self.tweets = tw.tweet['user']['statuses_count']
return self.tweets
def getTimeLine(self):
tl = t.statuses.user_timeline(screen_name=self.getUsername())
return tl
class TopicList(object):
def __init__(self, topic):
self.topic = topic
self.topic_list = []
def GetTopicList(self):
'''
Devuelve un lista con los "count" tuits mas recientes en los que aparece el topic
'''
topic_list = t.search.tweets(q=self.topic, count= 10, result_type='recenr')
return topic_list
class UsersList(object):
def __init__(self, lista):
self.lista = lista
self.users_list = []
def GetUsersList(self):
'''
Devuelve una lista de los usuarios no duplicados autores de los tuits de la lista recibida
'''
usersl = []
for el in self.lista['statuses']:
tw = Tweet(el)
if tw.getUsername() not in usersl:
usersl.append(tw.getUsername())
return usersl
class HashtagsList(object):
def __init__(self, user):
self.user = user
self.hashtagslist = []
def GetHashtagsList(self):
'''
Devuelve una lista con todos los hashtags utilizados por el usuario a lo largos de su timeline
'''
hl = []
us = User(self.user)
tl = us.getTimeLine()
for el in tl:
tw = Tweet(el)
for hashtag in tw.getHashtags():
hl.append(hashtag)
return hl
class TimesTopicInTl(object):
def __init__(self, user, topic):
self.user = user
self.topic = topic
self.times = 0
def GetTimesTopicInTl(self):
'''
Devuelve la cantidad de veces que el usuario ha utilizado el topic en su timeline
'''
cont = 0
us = User(self.user)
tl = us.getTimeLine()
for el in tl:
tw = Tweet(el)
text = tw.getText().lower()
if self.topic in text:
cont += 1
return cont
def active_users(topic):
'''
Devuelve una lista ordenada de los usuarios mas activos con el topic dado en sus ultimos 20 tuits (timeline)
Cada elemento de la lista es una lista con el user, numero de veces usado el topic, otros hashtags usados
en el TL
'''
act_users=[]
topl = TopicList(topic)
tl = topl.GetTopicList()
userl = UsersList(tl)
userslist = userl.GetUsersList()
for user in userslist:
hashtagsl = HashtagsList(user)
hl = hashtagsl.GetHashtagsList()
t = TimesTopicInTl(user, topic)
times = t.GetTimesTopicInTl()
if times_topic_in_timeline(user, topic) != 0:
act_users.append([times, user, hl])
return sorted(act_users, reverse=True)
#Test con el topic "betis"
for el in active_users("betis"):
print el
UTF-8
Python
false
false
2,014
8,375,186,274,038
89ba040cc808b8ceadb64d4143e6d14b214c85ee
840381ae1a6475bd9b8facacf08b2033df37b553
/webapp/settings.py
48f3b1f1a2f7a993da25f105652fb0a987763e2d
[]
no_license
azzu25/uploader
https://github.com/azzu25/uploader
a7a9af204f88dbd47245c4b20db3a7c9c2fcd2a2
4ab11fd536f4574af839721944969d8d64c196cd
refs/heads/master
2016-09-05T21:18:58.479166
2013-04-11T11:03:12
2013-04-11T11:03:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Django settings for webapp project.
from env_settings.base import *
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
print PROJECT_ROOT
MEDIA_ROOT = os.path.join(PROJECT_ROOT,'uploaded/files/')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'E:/tokendb.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
LOGGING['handlers']['error_log_handler'].update({'filename':'errors_log.log'})
LOGGING['handlers']['post_data_handler'].update({'filename':'post_data_log.log'})
UTF-8
Python
false
false
2,013
1,417,339,256,203
ac132fcbeeb17f3a7f3e3985911e0de12eefdc30
a23eba06ad0382d090a306e4c9a9700176b20ad3
/standard/windows/OppSourceWindow.py
2e56e6ea190ee078e44211e035eb5622ff27b359
[]
no_license
koapesrl/segupak
https://github.com/koapesrl/segupak
08df882b4ae4ca10845f5fc778c760bdfcbcd5ac
5dae4b193b8d0ff5ea6e0e1f0e6932b074f4382b
refs/heads/master
2016-06-02T15:04:01.516940
2013-08-17T17:04:36
2013-08-17T17:04:36
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#encoding: utf-8
from OpenOrange import *
ParentOppSourceWindow = SuperClass("OppSourceWindow", "MasterWindow", __file__)
class OppSourceWindow(ParentOppSourceWindow):
pass
#encoding: utf-8
import unittest
import zmq
from wsgid.core.wsgid import Wsgid
from wsgid.core.message import Message
from wsgid.core.parser import parse_options
import wsgid.conf as conf
import sys
from mock import patch, Mock, MagicMock
class WsgidTest(unittest.TestCase):
def setUp(self):
self.wsgid = Wsgid()
self.sample_headers = {
'METHOD': 'GET',
'VERSION': 'HTTP/1.1',
'PATTERN': '/root',
'URI': '/more/path/',
'PATH': '/more/path',
'QUERY': 'a=1&b=4&d=4',
'host': 'localhost',
'content-length': '42',
'content-type': 'text/plain',
'x-forwarded-for': '127.0.0.1'
}
sys.argv[1:] = []
parse_options()
def tearDown(self):
self.sample_headers = {}
conf.settings = None
'''
Creates the SCRIPT_NAME header from the mongrel2 PATTERN header.
SCRIPT_NAME should be the PATTERN without any regex parts.
'''
def test_script_name_header_simple_path(self):
self.sample_headers['PATTERN'] = "/py"
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("/py", environ['SCRIPT_NAME'])
def test_environ_script_name_header_more_comples_header(self):
self.sample_headers['PATTERN'] = '/some/more/path/'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("/some/more/path", environ['SCRIPT_NAME'])
def test_environ_script_name_header_root(self):
self.sample_headers['PATTERN'] = '/'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("", environ['SCRIPT_NAME'])
'''
PATH_INFO comes from (URI - SCRIPT_NAME) or (PATH - SCRIPT_NAME)
'''
def test_environ_path_info(self):
self.sample_headers['PATTERN'] = '/py'
self.sample_headers['PATH'] = '/py/some/py/path'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("/some/py/path", environ['PATH_INFO'])
def test_environ_path_info_app_root(self):
self.sample_headers['PATTERN'] = '/py'
self.sample_headers['PATH'] = '/py'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("", environ['PATH_INFO'])
def test_environ_unquoted_path_info(self):
self.sample_headers['PATTERN'] = '/py/'
self.sample_headers['PATH'] = '/py/so%20me/special%3f/user%40path'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('/so me/special?/user@path', environ['PATH_INFO'])
'''
Generates de REQUEST_METHOD variable
'''
def test_environ_request_method(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertTrue(environ.has_key('REQUEST_METHOD'))
self.assertEquals('GET', environ['REQUEST_METHOD'])
def test_environ_query_string(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("a=1&b=4&d=4", environ['QUERY_STRING'])
def test_environ_no_query_string(self):
#Not always we have a QUERY_STRING
del self.sample_headers['QUERY']
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals("", environ['QUERY_STRING'])
def test_environ_server_port(self):
self.sample_headers['host'] = 'localhost:443'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('443', environ['SERVER_PORT'])
def test_environ_server_port_default_port(self):
self.sample_headers['host'] = 'localhost'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('80', environ['SERVER_PORT'])
def test_environ_server_name(self):
self.sample_headers['host'] = 'localhost:8080'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('localhost', environ['SERVER_NAME'])
def test_environ_server_name_default_port(self):
self.sample_headers['host'] = 'someserver'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('someserver', environ['SERVER_NAME'])
'''
HTTP_HOST must inclue the port, if present.
'''
def test_environ_http_host(self):
self.sample_headers['host'] = 'localhost:8080'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('localhost:8080', environ['HTTP_HOST'])
def test_environ_content_type(self):
self.sample_headers['content-type'] = 'application/xml'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('application/xml', environ['CONTENT_TYPE'])
def test_environ_no_content_type(self):
del self.sample_headers['content-type']
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('', environ['CONTENT_TYPE'])
def test_environ_content_length(self):
self.sample_headers['content-length'] = '42'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('42', environ['CONTENT_LENGTH'])
def test_environ_no_content_length(self):
del self.sample_headers['content-length']
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('', environ['CONTENT_LENGTH'])
'''
Comes from mongrel2 VERSION header
'''
def test_environ_server_protocol(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertTrue(environ.has_key('SERVER_PROTOCOL'))
self.assertEquals('HTTP/1.1', environ['SERVER_PROTOCOL'])
def test_eviron_remote_addr(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('127.0.0.1', environ['REMOTE_ADDR'])
'''
Non Standard headers (X-) are passed untouched
'''
def test_environ_non_standart_headers(self):
self.sample_headers['X-Some-Header'] = 'some-value'
self.sample_headers['x-other-header'] = 'other-value'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('some-value', environ['X-Some-Header'])
self.assertEquals('other-value', environ['x-other-header'])
def test_environ_http_host_header(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('localhost', environ['HTTP_HOST'])
'''
All headers (but HTTP common headers and X- headers) must be HTTP_ suffixed
'''
def test_environ_other_headers(self):
self.sample_headers['my_header'] = 'some-value'
self.sample_headers['OTHER_HEADER'] = 'other-value'
self.sample_headers['X-Some-Header'] = 'x-header'
self.sample_headers['Accept'] = '*/*'
self.sample_headers['Referer'] = 'http://www.someserver.com'
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals('some-value', environ['HTTP_MY_HEADER'])
self.assertEquals('other-value', environ['HTTP_OTHER_HEADER'])
self.assertEquals('x-header', environ['X-Some-Header'])
self.assertEquals('*/*', environ['HTTP_ACCEPT'])
self.assertEquals('http://www.someserver.com', environ['HTTP_REFERER'])
'''
Test a complete request, with all typed of headers.
'''
def test_eviron_complete_request(self):
request = {
'METHOD': 'GET',
'VERSION': 'HTTP/1.1',
'PATTERN': '/py',
'URI': '/py/some/path',
'PATH': '/py/some/path',
'QUERY': 'a=1&b=4&d=4',
'host': 'localhost',
'Accept': '*/*',
'CUSTOM_HEADER': 'value',
'User-Agent': 'some user agent/1.0',
'content-length': '42',
'content-type': 'text/plain',
'x-forwarded-for': '127.0.0.1'
}
environ = self.wsgid._create_wsgi_environ(request)
self.assertEquals(24, len(environ))
self.assertEquals('GET', environ['REQUEST_METHOD'])
self.assertEquals('HTTP/1.1', environ['SERVER_PROTOCOL'])
self.assertEquals('/py', environ['SCRIPT_NAME'])
self.assertEquals('a=1&b=4&d=4', environ['QUERY_STRING'])
self.assertEquals('/some/path', environ['PATH_INFO'])
self.assertEquals('localhost', environ['SERVER_NAME'])
self.assertEquals('80', environ['SERVER_PORT'])
self.assertEquals('value', environ['HTTP_CUSTOM_HEADER'])
self.assertEquals('*/*', environ['HTTP_ACCEPT'])
self.assertEquals('some user agent/1.0', environ['HTTP_USER-AGENT'])
self.assertEquals('42', environ['CONTENT_LENGTH'])
self.assertEquals('42', environ['content-length'])
self.assertEquals('text/plain', environ['CONTENT_TYPE'])
self.assertEquals('text/plain', environ['content-type'])
self.assertEquals('localhost', environ['HTTP_HOST'])
self.assertEquals('127.0.0.1', environ['REMOTE_ADDR'])
'''
Some values are fixed:
* wsgi.multithread = False
* wsgi.multiprocess = True
* wsgi.run_once = True
* wsgi.version = (1,0)
'''
def test_environ_fixed_values(self):
environ = self.wsgid._create_wsgi_environ(self.sample_headers)
self.assertEquals(False, environ['wsgi.multithread'])
self.assertEquals(True, environ['wsgi.multiprocess'])
self.assertEquals(True, environ['wsgi.run_once'])
self.assertEquals((1,0), environ['wsgi.version'])
self.assertEquals("http", environ['wsgi.url_scheme'])
self.assertEquals(sys.stderr, environ['wsgi.errors'])
def test_join_m2_chroot_to_async_upload_path(self):
# The value in x-mongrel2-upload-{start,done} should be prepended with the
# value of --m2-chroot, passed on the command line
with patch('zmq.Context'):
def _serve_request(wsgid, m2message, expected_final_path):
with patch.object(wsgid, '_create_wsgi_environ'):
wsgid._create_wsgi_environ.return_value = {}
with patch("__builtin__.open") as mock_open:
with patch('os.unlink'):
wsgid._call_wsgi_app(message, Mock())
self.assertEquals(1, mock_open.call_count)
mock_open.assert_called_with(expected_final_path)
self._reparse_options('--mongrel2-chroot=/var/mongrel2')
wsgid = Wsgid(app = Mock(return_value=['body response']))
message = self._create_fake_m2message('/uploads/m2.84Yet4')
_serve_request(wsgid, message, '/var/mongrel2/uploads/m2.84Yet4')
self._reparse_options()
_serve_request(wsgid, message, '/uploads/m2.84Yet4')
def test_remove_async_file_after_request_finishes_ok(self):
# Since mongrel2 does not remove the originial temp file, wsgid
# must remove it after the request was successfully (or not) handled.
with patch('zmq.Context'):
with patch('os.unlink') as mock_unlink:
def _serve_request(wsgid, m2message):
with patch.object(wsgid, '_create_wsgi_environ'):
wsgid._create_wsgi_environ.return_value = {}
with patch("__builtin__.open") as mock_open:
wsgid._call_wsgi_app(message, Mock())
wsgid = Wsgid(app = Mock(return_value=['body response']))
message = self._create_fake_m2message('/uploads/m2.84Yet4')
_serve_request(wsgid, message)
mock_unlink.assert_called_with('/uploads/m2.84Yet4')
def test_remove_async_file_after_failed_request(self):
# Even if the request failed, wsgid must remove the temporary file.
with patch('zmq.Context'):
with patch('os.unlink') as mock_unlink:
def _serve_request(wsgid, m2message):
with patch.object(wsgid, '_create_wsgi_environ'):
wsgid._create_wsgi_environ.return_value = {}
with patch("__builtin__.open") as mock_open:
wsgid._call_wsgi_app(message, Mock())
wsgid = Wsgid(app = Mock(side_effect = Exception("Failed")))
wsgid.log = Mock()
message = self._create_fake_m2message('/uploads/m2.84Yet4')
_serve_request(wsgid, message)
mock_unlink.assert_called_with('/uploads/m2.84Yet4')
def test_protect_against_exception_on_file_removal(self):
with patch('zmq.Context'):
with patch('os.unlink') as mock_unlink:
mock_unlink.side_effect = OSError("File does not exist")
def _serve_request(wsgid, m2message):
with patch.object(wsgid, '_create_wsgi_environ'):
wsgid._create_wsgi_environ.return_value = {}
with patch("__builtin__.open") as mock_open:
wsgid._call_wsgi_app(message, Mock())
wsgid = Wsgid(app = Mock(return_value = ['body response']))
wsgid.log = Mock()
message = self._create_fake_m2message('/uploads/m2.84Yet4')
_serve_request(wsgid, message)
self.assertEquals(1, wsgid.log.exception.call_count)
def test_do_not_try_to_remove_if_not_upload_request(self):
with patch('zmq.Context'):
with patch('os.unlink') as mock_unlink:
def _serve_request(wsgid, m2message):
with patch.object(wsgid, '_create_wsgi_environ'):
wsgid._create_wsgi_environ.return_value = {}
with patch("__builtin__.open") as mock_open:
wsgid._call_wsgi_app(message, Mock())
wsgid = Wsgid(app = Mock(return_value = ['body response']))
wsgid.log = Mock()
message = Mock()
message.headers = [] #It's not an upload message
message.client_id = 'uuid'
message.server_id = '1'
message.is_upload_done.return_value = False
_serve_request(wsgid, message)
self.assertEquals(0, mock_unlink.call_count)
def _reparse_options(self, *args):
sys.argv[1:] = args
conf.settings = None
parse_options()
def _create_fake_m2message(self, async_upload_path):
message = Mock()
message.headers = {'x-mongrel2-upload-start': async_upload_path,
'x-mongrel2-upload-done': async_upload_path}
message.async_upload_path = async_upload_path
message.server_id = 'uuid'
message.client_id = '42'
return message
class WsgidReplyTest(unittest.TestCase):
def setUp(self):
self.wsgid = Wsgid()
self.sample_uuid = 'bb3ce668-4528-11e0-94e3-001fe149503a'
self.sample_conn_id = '42'
def test_reply_no_headers(self):
m2msg = self.wsgid._reply(self.sample_uuid, self.sample_conn_id, '200 OK', body='Hello World\n')
resp = "%s 2:42, HTTP/1.1 200 OK\r\nContent-Length: 12\r\n\r\nHello World\n" % (self.sample_uuid)
self.assertEquals(resp, m2msg)
def test_reply_no_body(self):
headers = [('Header', 'Value'), ('X-Other-Header', 'Other-Value')]
m2msg = self.wsgid._reply(self.sample_uuid, self.sample_conn_id, '200 OK', headers=headers)
resp = "%s 2:42, HTTP/1.1 200 OK\r\n\
Header: Value\r\n\
X-Other-Header: Other-Value\r\n\
Content-Length: 0\r\n\r\n" % (self.sample_uuid)
self.assertEquals(resp, m2msg)
def test_reply_with_body_andheaders(self):
headers = [('Header', 'Value'), ('X-Other-Header', 'Other-Value')]
body = "Hello World\n"
m2msg = self.wsgid._reply(self.sample_uuid, self.sample_conn_id, '200 OK', headers=headers, body=body)
resp = "%s 2:42, HTTP/1.1 200 OK\r\n\
Header: Value\r\n\
X-Other-Header: Other-Value\r\n\
Content-Length: 12\r\n\r\n\
Hello World\n" % (self.sample_uuid)
self.assertEquals(resp, m2msg)
UTF-8
Python
false
false
2,012
7,138,235,685,643
62f0bc32d2347b7c197b25a9d49a5d709531d82a
1c45f82d87aceab5c994c3ab00ef3f9ca7cdad07
/menu_noblock_test.py
a0c57c0922af8477e379587701db6517d0b21ae6
[]
no_license
kuninagakura/pyStdDraw
https://github.com/kuninagakura/pyStdDraw
212517eb4d663a9ef3e584682f260149a4e4998b
8b28ab551ade9152f14c7140d7c42114391ff48c
refs/heads/master
2021-01-01T19:52:13.285088
2014-07-23T15:33:22
2014-07-23T15:33:22
22,156,382
2
4
null
null
null
null
null
null
null
null
null
null
null
null
null
import os
import sys
import pygame
from pygame.locals import *
progname = sys.argv[0]
progdir = os.path.dirname(progname)
sys.path.append(os.path.join(progdir,'gamelib'))
from popup_menu import NonBlockingPopupMenu
import stddrawpygame as stddraw
stddraw.createWindow()
stddraw.setXscale(-1.0, 1.0)
stddraw.setYscale(-1.0, 1.0)
rx = .480
ry = .860
vx = .015
vy = .023
radius = .05
dt = 20
## Menu data and functions.
global menu_data
global _surface
menu_data = (
'Main',
'Save',
'Quit',
)
menu = NonBlockingPopupMenu(menu_data)
def handle_menu(e):
global menu
print 'Menu event: %s.%d: %s' % (e.name,e.item_id,e.text)
if e.name is None:
print 'Hide menu'
menu.hide()
elif e.name == 'Main':
print 'I am in the menu'
if e.text == 'Save':
print 'I hit save'
stddraw.save(_surface)
elif e.text == 'Quit':
quit()
## Main loop.
while 1:
# Update ball position and draw it there.
if abs(rx + vx) + radius > 1.0:
vx = -vx
if abs(ry + vy) + radius > 1.0:
vy = -vy
rx = rx + vx
ry = ry + vy
#stddraw.clear()
stddraw.setPenColor(stddraw.GRAY)
stddraw.filledSquare(0, 0, 1.0)
stddraw.setPenColor(stddraw.BLACK)
stddraw.filledCircle(rx, ry, radius)
stddraw.show(dt)
# If the menu is visible it will be drawn.
menu.draw()
pygame.display.flip()
# Pass them through the menu. If the menu is visible it will consume mouse
# events and return any unhandled events; else it will return them all.
# Process the unhandled events returned by the menu. Function handle_menu()
# processes only events posted by the menu.
for e in menu.handle_events(pygame.event.get()):
if e.type == KEYDOWN:
print 'Key pressed:',pygame.key.name(e.key)
elif e.type == MOUSEBUTTONUP:
print 'Show menu'
menu.show()
elif e.type == USEREVENT:
if e.code == 'MENU':
print 'handle menu is called'
handle_menu(e)
UTF-8
Python
false
false
2,014
19,078,244,761,951
b8b0c5f65a3e2fa3ed0e980ff811b8cef51b2143
0dfe2907eda086a9e9ebce87d997f94994ced51e
/handlers/signup.py
2a7f69977df80244769e75cff24207f4a646ddfd
[]
no_license
suharshs/CU-Bucket
https://github.com/suharshs/CU-Bucket
559426b26378e546ae52194a563e4e3de170f05d
1e835a69774c647e270fd032edbb61ef7002c236
refs/heads/master
2021-01-22T01:28:13.910376
2012-12-05T06:42:47
2012-12-05T06:42:47
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from base import BaseHandler
import simplejson as json
class SignupHandler(BaseHandler):
def get(self):
self.render('signup.html')
def post(self):
username = self.get_argument('username', '')
password = self.get_argument('password', '')
created = self.create_user(username, password)
if created:
self.set_secure_cookie('username', username) # set the session
self.write(json.dumps({"passed": "true"}))
self.finish()
else:
self.write(json.dumps({"passed": "false"}))
self.finish()
def create_user(self, username, password):
sql = "SELECT name FROM User WHERE name = \"%s\" " % (username)
results = self.application.db.query(sql)
if (len(results) > 0):
return False
sql = "INSERT INTO User (name, password) VALUES (\"%s\", \"%s\")" % (username, password)
self.application.db.execute(sql)
return True
UTF-8
Python
false
false
2,012
19,473,381,744,512
a5713ea3575e8e5d446cb395b77e8237b013fc70
1edaca5ed0f3696b873c65f1189f01015f6c17bd
/shift_file.py
c40094d046b5a153dc5ba37c66e974c48cb0c0c0
[]
no_license
Lactor/UROP_Comp
https://github.com/Lactor/UROP_Comp
cf1016193a63354f141c0ebfb3861c7c5d132561
b277ee9bab1641cdb08495d0fabe104ddd8520ab
refs/heads/master
2020-04-17T20:53:42.004471
2014-06-01T03:33:00
2014-06-01T03:33:00
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys
''' Script used to generate and offset SED '''
input_file_name = sys.argv[1]
input_file = open(input_file_name, 'r')
output_file_name = sys.argv[2]
shift_factor = float(sys.argv[3])
FILE_SECOND_COLUMN = 4 #Position of the value when the line is split
data = [[], []] #first array wavelengths second intensities
for line in input_file:
values = line.split(' ')
print(values)
data[0].append( float(values[0]))
#print("Main WL Added: " +str(data[0][-1]) )
data[1].append(float(values[ FILE_SECOND_COLUMN]))
#Data in logspace
for i in range(len(data[0])-1):
data[0][i] = data[0][i] + shift_factor*(data[0][i+1]-data[0][i])
output_file = open(output_file_name, 'w')
for i in range(len(data[0])-1):
output_file.write(str(data[0][i]) + " \t " + str(data[1][i])+" \n")
UTF-8
Python
false
false
2,014
19,378,892,464,554
9919a93c399cf0a55996c6d090edebe56bbd469e
b7cbbe8dd8c2a293679818b8dacaf9b171a0bf84
/models/slda/test/generate_data.py
5d388ef66fafd65db40d3cf77e51c73d89c2af16
[]
no_license
kenoskynci/mad_topic_model
https://github.com/kenoskynci/mad_topic_model
4887c92139fa892d4480778c90f170d4da3ada86
49ca46d6f6988a788f017deba0aa3baf89d8747f
refs/heads/master
2020-03-19T09:38:01.970894
2014-05-14T15:44:08
2014-05-14T15:44:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""
Generates artificial data to test SLDA model
Outputs:
+ n_topics files starting with given prefix:
prefix_1
prefix_2
prefix_...
prefix_n
where n = n_topics
+ a label file:
prefix_label
"""
import argparse
import numpy as np
from collections import defaultdict
def generate_documents(n_authors, n_topics, n_docs, n_words,
n_words_per_doc, divider=1.0):
"""
Implements generative process for LDA
Generates n_docs for each of the n_authors, where each author has
a distribution over n_topics and each topic has a distribution
over n_words
"""
# Generate author dirichlet distributions over topics
author_p = []
for _ in range(n_authors):
x = 0.01 + np.random.rand(n_topics) / divider
author_p.append(x)
print(x)
# Generate topic multinomial distributions over words
# (drawn from dirichlet)
x = np.random.rand(n_words)
topic_p = np.random.dirichlet(x, n_topics)
# docs is a list of [lists of documents]
# docs[i] is a list of documents by author i
# Each document is a dictionary from {word id: count}
docs = []
for a in range(n_authors):
# +a to get unevent number of documents per author
for d in range(n_docs + a):
doc = defaultdict(lambda: 0)
doc['AUTHOR'] = a
word_counts = np.zeros(n_words)
words_in_doc = np.random.poisson(n_words_per_doc)
# This documents multinomial distribution over topics
doc_topic_dist = np.random.dirichlet(author_p[a], words_in_doc)
# Number of words from each topic in this document
doc_topics = np.array([np.random.multinomial(1, dist)
for dist in doc_topic_dist]).sum(axis=0)
for topic, count in enumerate(doc_topics):
for i in range(count):
word = np.random.multinomial(1, topic_p[topic]).nonzero()[0][0]
doc[word] += 1
docs.append(doc)
return docs
def save_docs(docs, prefix, topic_type):
"""
docs: list of length #authors where docs[i] = [list of {docs}]
as produced by generate_documents
prefix: Output prefix. Output file is $(prefix)_$(topic_type)
topic_type: word type id (integer)
"""
with open(prefix + "_" + str(topic_type), 'w') as f:
for doc in docs:
l = len(doc)
if "AUTHOR" in doc:
l -= 1
f.write(str(l) + " ")
for k in doc:
if k is "AUTHOR":
continue
f.write("{}:{} ".format(k, doc[k]))
f.write("\n");
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Generate artificial data to test SLDA model')
parser.add_argument('--prefix', help='Prefix for output files')
parser.add_argument('--n_authors', help='Number of authors to generate',
default=2, type=int)
parser.add_argument('--n_docs',
help='Number of documents per author to generate',
default=20, type=int)
parser.add_argument('--n_topics',
help='Number of topics',
default=2, type=int)
parser.add_argument('--n_types',
help='Number of ngram types',
default=3, type=int)
parser.add_argument('--n_words',
help='vocabulary size',
default=1500, type=int)
parser.add_argument('--n_words_per_doc',
help='Mean number of words per doc',
default=1000, type=int)
parser.add_argument('--divisor',
help='Divisor for dirichlet parameters',
default=1.0, type=float)
args = parser.parse_args()
# Write a file for each ngram type
type_docs = None
for i in range(args.n_types):
type_docs = generate_documents(args.n_authors, args.n_topics,
args.n_docs, args.n_words,
args.n_words_per_doc,
divider=args.divisor)
save_docs(type_docs, args.prefix, i)
# Go through once and write the label file
with open(args.prefix + "_labels", 'w') as f:
for doc in type_docs:
f.write(str(doc['AUTHOR']) + "\n")
UTF-8
Python
false
false
2,014
1,692,217,129,487
d01fd6423c2f37e96227a76157bb82f6288df3c1
3148826c0f32d19eeb176c38b3aa46c6c34d7d03
/game/management/commands/initdb.py
8c3933f927ef8016f8da3ccd4b8abe07c1b6f8ea
[]
no_license
Nagasaki45/Audio-Game
https://github.com/Nagasaki45/Audio-Game
cf5a9cb70a9cad1243a4e8bb2eb4754ca4c21d61
cde0e8c601a626583aec21552a9481254fc3e081
refs/heads/master
2016-09-01T16:55:00.420889
2014-03-05T13:16:47
2014-03-05T13:16:47
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from django.core.management.base import BaseCommand, CommandError
from ...models import Game
class Command(BaseCommand):
help = 'Run it to create game room #1'
def handle(self, *args, **options):
Game.objects.get_or_create(slug=1)
UTF-8
Python
false
false
2,014
13,443,247,644,666
e63f471b55271ffada51fd95485f48a9c07efe4a
834d1ac3728b3d29856a6099420ba88939bc05d3
/apps/images/views.py
7ad94acbbd051d8a0846dfdeeba293639ec367cf
[]
no_license
zhangchunlin/homemm
https://github.com/zhangchunlin/homemm
048ff19cff30811df8d7d7b55a618cfbcbb39508
f833961418dad289bae586520352464a1b5eb75d
refs/heads/master
2016-09-07T10:45:21.317695
2013-11-07T01:16:14
2013-11-07T01:16:14
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#coding=utf-8
import os
from datetime import datetime
import mimetypes
import StringIO
from uliweb import expose, NotFound,settings
from uliweb.orm import get_model
from werkzeug import Response, wrap_file
import Image
@expose('/')
def index():
return {}
def _scanimages():
msgs = []
ImageRootPaths = get_model("imagerootpaths")
irp = {}
for p in list(ImageRootPaths.all()):
irp[p.path] = True
for path in settings.HMMIMAGES.ROOTPATHS:
if not irp.has_key(path):
rp = ImageRootPaths(path = path)
rp.save()
ImageDirPaths = get_model("imagedirpaths")
Images = get_model("images")
def get_idp_dict(rp):
idp_dict = {}
for p in list(ImageDirPaths.filter(ImageDirPaths.c.rootpath==rp.id)):
idp_dict[p.relpath]=p
return idp_dict
def get_image_dict(idp):
image_dict = {}
for img in list(Images.filter(Images.c.dpath==idp.id)):
image_dict[img.filename]=img
return image_dict
def scanapath(rp):
idp_dict = get_idp_dict(rp)
for root,dirs,files in os.walk(rp.path):
relp = os.path.relpath(root,rp.path)
database_havenot = not idp_dict.has_key(relp)
if database_havenot:
image_dict = {}
else:
idp = idp_dict[relp]
image_dict = get_image_dict(idp)
for fn in files:
if fn.lower().endswith(".jpg"):
if database_havenot:
idp = ImageDirPaths(rootpath=rp.id,relpath = relp)
idp.save()
idp_dict[idp.relpath]=idp
database_havenot = False
image_dict = get_image_dict(idp)
if not image_dict.has_key(fn):
img = Images(dpath=(idp.id),filename=fn)
img.save()
oldnumi = Images.all().count()
oldnumd = ImageDirPaths.all().count()
for rp in list(ImageRootPaths.all()):
scanapath(rp)
newnumi = Images.all().count()
newnumd = ImageDirPaths.all().count()
s ="dir num:%d"%(newnumd)
if newnumd>oldnumd:
s += ",add %d dir"%(newnumd-oldnumd)
msgs.append(s)
s ="images num:%d"%(newnumi)
if newnumi>oldnumi:
s += ",add %d images"%(newnumi-oldnumi)
msgs.append(s)
return ("<br />").join(msgs)
@expose('/scanimages')
def scanimages():
result = _scanimages()
flash(result)
return "Scan all images OK!"
@expose('/imgdir/<int:id>')
def imgdir(id):
return {"dirid":id}
def _getthumbnaildata(fp,width=0):
img = Image.open(fp)
if width==0:
width = img.size[0] * 6 / 100
height = img.size[1] * width / (img.size[0])
img.thumbnail((width,height),Image.ANTIALIAS)
timgoutput = StringIO.StringIO()
img.save(timgoutput,format="JPEG",quality = 95)
return timgoutput.getvalue()
@expose('/imgthumbnail/<int:id>')
def imgthumbnail(id):
width = int(request.GET.get('width',0))
Images = get_model("images")
img = Images.get(id)
idp = img.dpath
irp = idp.rootpath
filename = os.path.join(irp.path,idp.relpath,img.filename)
guessed_type = mimetypes.guess_type(filename)
mime_type = guessed_type[0] or 'application/octet-stream'
headers = []
headers.append(('Content-Type', mime_type))
return Response(_getthumbnaildata(filename,width), status=200, headers=headers,
direct_passthrough=True)
@expose('/imgview/<int:id>')
def imgview(id):
pass
def _opener(filename):
if not os.path.exists(filename):
raise NotFound
return (
open(filename, 'rb'),
datetime.utcfromtimestamp(os.path.getmtime(filename)),
int(os.path.getsize(filename))
)
@expose('/img/<int:id>')
def img(id):
Images = get_model("images")
img = Images.get(id)
idp = img.dpath
irp = idp.rootpath
filename = os.path.join(irp.path,idp.relpath,img.filename)
guessed_type = mimetypes.guess_type(filename)
mime_type = guessed_type[0] or 'application/octet-stream'
headers = []
headers.append(('Content-Type', mime_type))
f, mtime, file_size = _opener(filename)
return Response(wrap_file(request.environ, f), status=200, headers=headers,
direct_passthrough=True)
@expose('/api/imgs')
def api_imgs():
dirid = int(request.GET.get("dirid",1))
num1page = int(request.GET.get("num1page",10))
page = int(request.GET.get("cpage",0))
Images = get_model("images")
fl = Images.filter(Images.c.dpath==dirid).offset(page*num1page).limit(num1page)
tnum = Images.filter(Images.c.dpath==dirid).count()
print [img.id for img in list(fl)]
return json({'tnum':tnum,'imgs':[img.id for img in list(fl)]})
UTF-8
Python
false
false
2,013
11,716,670,792,094
2f3e4008a77c5c82e3025ad63c8b980103d40537
f6d6c39af4e933ab33016bfcad7b57d33b04cdc9
/nb.py
3edda012140c1f9795c1c97febb6be4837cd9cfb
[]
no_license
maniteja123/naivebayes
https://github.com/maniteja123/naivebayes
70d69238d6623b2e60520f578ada2b3503dbfe34
e22f196dd8ad964672d652d65650589928155bc8
refs/heads/master
2021-01-15T13:48:22.874169
2014-12-19T08:20:25
2014-12-19T08:20:25
28,220,409
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from collections import Counter as C
import math,sys
class Vocab:
def __repr__(self):
return 'The vocabulary'
def __init__(self,docs):
self.vocabs = {}
self.vocab=C()
self.sizes = {}
self.number = {}
for doc,target,num in docs:
self.number[target] = num
f = open(doc,'r')
l=f.read()
w=l.split(',')
self.vocabs[target] = C()
for i in w:
self.vocabs[target].update({i.split("\'")[1]:int(i.split()[-1])})
self.sizes[target]=sum(self.vocabs[target].values())
self.vocab.update(self.vocabs[target])
f.close()
self.size = len(self.vocab.keys())
self.total = sum(self.number.values())
class Naive_Bayes:
def __init__(self,vocabulary,targets):
self.vocab = vocabulary.vocab
self.vocabsize = vocabulary.size
self.vocabs = vocabulary.vocabs
self.count = vocabulary.sizes
self.targets = targets
self.targ_prob = {}
self.word_prob = {}
self.numbers = vocabulary.number
self.total = vocabulary.total
self.build_naive_bayes()
def build_naive_bayes(self):
for target in self.targets:
self.targ_prob[target]=float(self.numbers[target])/self.total
self.word_prob[target]={}
for word in self.vocab.keys():
try:
self.word_prob[target][word] = float(self.vocabs[target][word]+1)/(self.count[target]+self.vocabsize)
except KeyError:
self.word_prob[target][word] = float(1)/(self.count[target]+self.vocabsize)
print len(self.word_prob[target])
def test_naive_bayes(self,doc):
with open(doc,'r') as f:
w = f.read().split()
output = None
final = -1*sys.maxint
for target in self.targets:
prob = 0
for word in w:
if word in self.vocab.keys():
print target , word , self.word_prob[target][word],prob
prob += math.log(self.word_prob[target][word])
prob += math.log(self.targ_prob[target])
#print prob
if prob > final:
output = target
final = prob
print output
v = Vocab([('manisports.txt','s',350),('manipolitics.txt','p',450)])
print v.size
print v.sizes
f = open('testing1.txt','w+')
f.write(str(v.vocab))
n = Naive_Bayes(v,['s','p'])
n.test_naive_bayes('test.txt')
print n.word_prob['s']['test']
UTF-8
Python
false
false
2,014
15,899,968,970,248
d0117efbb57723bf8f39bd84706362cff2b83c33
f907c5fa0b987d168b90defecfbb634bf343a556
/dogeapp/models.py
1bcbae0cb253620f641e0314ed37e54c42afce25
[]
no_license
hbd/dogefact
https://github.com/hbd/dogefact
b7fff121beec05f79a1f8d776114132f4ab01e91
2790bfce2ea8643489ee99e96fe55441d5b3bfb7
refs/heads/master
2021-01-18T06:34:37.899487
2014-05-10T19:24:08
2014-05-10T19:24:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import string
from django.db import models
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from django.core.validators import MaxLengthValidator
class User(models.Model):
phone_number = models.CharField(max_length=500)
start_date = models.DateTimeField('subscription start date', auto_now=True)
current_message = models.PositiveSmallIntegerField(default=0)
confirmation_code = models.CharField(max_length=255)
is_active = models.BooleanField(default=False)
def __unicode__(self):
return self.phone_number
class Meta:
ordering = ['start_date']
verbose_name_plural = "Users"
class Message(models.Model):
content = models.TextField(validators=[MaxLengthValidator(160)])
pub_date = models.DateTimeField('date published', auto_now_add=True,)
def __unicode__(self):
return self.content
class Meta:
ordering = ['pub_date']
verbose_name_plural = "Messages"
class UserForm(ModelForm):
class Meta:
model = User
fields = ['phone_number']
def clean_phone_number(self):
input_number = self['phone_number'].value()
if not input_number:
raise ValidationError("You didn't type a phone number!")
phone_number = ""
for c in input_number:
if c in string.digits:
phone_number += c
elif c not in string.punctuation:
raise ValidationError("The number you entered was invalid. Please try again.")
if len(phone_number) < 10:
raise ValidationError("The phone number you entered was too short. Please try again.")
if len(phone_number) > 15:
raise ValidationError("The phone number you entered was too long. Please try again.")
return input_number
class DeleteUserForm(ModelForm):
class Meta:
model = User
fields = ['phone_number']
UTF-8
Python
false
false
2,014
4,037,269,309,428
ff7c080b644b35c53fde42955f54de393edf0d45
7198006b6a3b209c35b52455c147921fc0b7ad8c
/quotes/api.py
d8fd0c8bc8c2e52792d6d5af7a10243db2ebeec3
[]
no_license
jonkomperda/pyFi
https://github.com/jonkomperda/pyFi
28bfc99dca82e13b619f2b54ad8811971ca5b12e
69a14eb5112d637c8a9678313663fcd16ccdd149
refs/heads/master
2016-09-05T11:02:34.970856
2014-05-02T19:28:23
2014-05-02T19:28:23
19,328,563
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
## @package pyFi.quotes.api
# Methods for determining option prices and interest rates
import urllib
import re
## Gets quote for a symbol from Google Finance
# @param symbol= String name of stock symbol. For example 'GOOG' would be Google
#
# Uses urllib to get quote from http://finance.google.com/finance?q=
def get_quote_google(symbol):
base_url = 'http://finance.google.com/finance?q='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('id="ref_.*?_l".*?>(.*?)<', content)
if m:
quote = m.group(1)
else:
print 'No quote available for: ' + symbol
quote = float(raw_input('Please enter symbol quote: $'))
return float(quote)
## Gets quote for a symbol from Yahoo Finance
# @param symbol= String name of stock symbol. For example 'GOOG' would be Google
#
# Uses urllib to get quote from http://finance.yahoo.com/q?s=
def get_quote_yahoo(symbol):
base_url = 'http://finance.yahoo.com/q?s='
content = urllib.urlopen(base_url + symbol).read()
m = re.search('id="yfs_l84_.*?>(.*?)<',content)
if m:
quote = m.group(1)
else:
print 'No quote available for: ' + symbol
quote = float(raw_input('Please enter symbol quote: $'))
return float(quote)
## Gets the risk free rate
# Uses 1 year Treasury Note rate from bankrate.com
def get_risk_free():
base_url = 'http://www.bankrate.com/rates/interest-rates/1-year-treasury-rate.aspx'
content = urllib.urlopen(base_url).read()
m = re.search('class="tabledataoddnew">0(.*?)<',content)
if m:
quote = float(m.group(1))/100.0
else:
print 'No T-Note data available...'
quote = float(raw_input('Please enter risk free interest rate: '))
return quote
if __name__ == '__main__':
#print get_quote_google('GOOG')
#print get_quote_yahoo('AAPL')
print get_risk_free()
#import string
import fileinput
GAMELIST_FILENAME = "noise.txt"
#GAMELIST_FILENAME = "test.txt"
#takes a readline from the poker.txt file and splits into player1 and 2 hands. returns both hands
def make_player_hands(line):
rawHands = line.replace(' ', ',').split(',')
hand1 = rawHands[:5]
hand2 = rawHands[5:]
return hand1, hand2
def seperate_properties(hand):
rank =[]
suit = []
#seperate rank and suit
for i in range(0,len(hand)):
rank.append(hand[i][0])
suit.append(hand[i][1])
for i, val in enumerate(rank):
rank[i] = convert_rank(val)
rank = sorted(rank)
rank.reverse()
return rank, suit
#input array of 5 ints, output true or false if it is a straight.
#the plus refers to the added functionality of switching the 14 ace value in the case of a wheel
def is_straight_plus(hand):
h = hand
h.sort()
h.reverse()
for i in range(len(h)-2,0,-1 ):
#print 'i:', i
if h[i] - 1 != h[i+1]:
return False, h
#print "i", i
if h[i-1] - 1 == h[i] :
return True, h
elif min(h) == 2 and h.count(14) == 1:
h.remove(14)
h.append(1)
return True, h
else:
return False, h
#print is_straight_plus([14,5,2,4,3])
def is_flush(suits):
for i in range(0, len(suits)-1):
if suits[i] != suits[i+1]:
return False
return True
#input list of rank int and outpus zeroes added string
def add_zeroes(rank):
score = ''
#adds a zero to single digits
for num in rank:
if len(str(num)) == 1:
score += "0" + str(num)
else:
score += str(num)
return score
#this takes a rank list and a repetition count list and outputs a sorted order by poker ranking
def rank_resort(rank, countList):
s = []
result = []
for i in range(4,0,-1):
s = []
for j, val in enumerate(countList):
if i == val:
s.append(rank[j])
if s != []:
s.sort()
s.reverse()
result.append(s)
result = [item for sublist in result for item in sublist]
return result
def hand_score(rank, suit):
r = []
#counts how many repetitions of each rank
for c in rank:
r.append(rank.count(c))
rank = rank_resort(rank,r)
score = add_zeroes(rank)
handRank = max(r)-1
if handRank == 0:
#this if for the special case of resorting the wheel and swithcing the 14 to 1
s = is_straight_plus(rank)
isStraight = s[0]
straightRank = s[1]
if is_flush(suit) and isStraight:
return '8' + add_zeroes(straightRank)
elif isStraight:
return '4' + add_zeroes(straightRank)
elif is_flush(suit):
return '5' + score
else:
return '0' + score
#1 pair
elif handRank == 1:
if r.count(2) == 4:
return '2' + score
else:
return '1' + score
#2 pair
elif handRank == 2:
if r.count(2) > 0:
return '6' + score
else:
return '3' + score
elif handRank == 3:
return '7' + score
else:
return -666
#print hand_score([5,4,5,2,13],['S','C','D','H','H'])
def p54():
results = []
for line in fileinput.input([GAMELIST_FILENAME]):
# print line
win = winner(line)
# print "Player", win, "wins"
results.append(win)
# print results
return results.count(1)
print p54()
UTF-8
Python
false
false
2,013
16,415,365,031,535
5b992c85e0dbf264326d39be6cf2251a90e55586
4bcdba23d2459b585de85fc58bd91ad3111d5386
/boosting_feature.py
5b2b0b0c690b66fb390758aae2aac5039627f49d
[]
no_license
xiaoyao1991/hmmpy
https://github.com/xiaoyao1991/hmmpy
d128182410000bf5ffc85692380352c8a0b72633
c5766306cb847bf003a3edd44a380bf9ce30dd04
refs/heads/master
2020-05-18T11:05:12.808236
2013-10-03T11:50:44
2013-10-03T11:50:44
14,082,636
1
1
null
null
null
null
null
null
null
null
null
null
null
null
null
from feature import *
PARALLEL_PIPELINE = [
'f_is_capitalized',
'f_is_all_upper',
'f_is_english',
'f_has_both_char_and_digit',
'f_is_delimiter',
'f_is_punctuation',
'f_is_sequential_punctuation',
'f_has_digit',
'f_is_all_digit',
'f_is_possible_year',
'f_is_in_namelist',
'f_is_in_venuelist',
'f_is_fname_abbrev',
'f_is_preceeded_by_delimiter',
'f_is_followed_by_delimiter',
'f_is_possible_page_number',
'f_is_an_and_between_two_names',
'f_is_punctuation_in_name',
'f_is_followed_by_year',
'f_is_possible_new_notion',
# Optional features, parallel
'f_is_repeated_name_token',
'f_is_repeated_delimiter_token',
'f_is_repeated_title_token',
'f_is_repeated_venue_token',
]
# This is the feature set used during second iteration, inherited from FeatureGenerator
# @param: parallel params means the segmentation results from the first iteration
class BoostingFeatureGenerator(FeatureGenerator):
def __init__(self, token_BGM, pattern_BGM):
super(BoostingFeatureGenerator, self).__init__()
self.token_BGM = token_BGM
self.pattern_BGM = pattern_BGM
self.pipeline = PARALLEL_PIPELINE
# ================================= Token level, parallel features. =================================
# Check the most labeled label for this token, and check the total occurance
def f_is_repeated_name_token(self, idx):
token = self.tokens[idx]
if self.token_BGM.has_key(token):
total_occurance = 0
for k,v in self.token_BGM[token]:
total_occurance += v
return int(total_occurance>=5 and self.token_BGM[token][0][0] in ['0', '1'])
else:
return 0
def f_is_repeated_delimiter_token(self, idx):
token = self.tokens[idx]
if self.token_BGM.has_key(token):
total_occurance = 0
for k,v in self.token_BGM[token]:
total_occurance += v
return int(total_occurance>=5 and self.token_BGM[token][0][0] == '2')
else:
return 0
def f_is_repeated_title_token(self, idx):
token = self.tokens[idx]
if self.token_BGM.has_key(token):
total_occurance = 0
for k,v in self.token_BGM[token]:
total_occurance += v
return int(total_occurance>=2 and self.token_BGM[token][0][0] == '3')
else:
return 0
def f_is_repeated_venue_token(self, idx):
token = self.tokens[idx]
if self.token_BGM.has_key(token):
total_occurance = 0
for k,v in self.token_BGM[token]:
total_occurance += v
return int(total_occurance>=5 and self.token_BGM[token][0][0] == '4')
else:
return 0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from distribute_setup import use_setuptools; use_setuptools()
from setuptools import setup, find_packages
rel_file = lambda *args: os.path.join(os.path.dirname(os.path.abspath(__file__)), *args)
def read_from(filename):
fp = open(filename)
try:
return fp.read()
finally:
fp.close()
def get_version():
data = read_from(rel_file('src', 'djqmixin', '__init__.py'))
return re.search(r"__version__ = '([^']+)'", data).group(1)
setup(
name = 'django-qmixin',
version = get_version(),
author = "Zachary Voase",
author_email = "[email protected]",
url = 'http://github.com/zacharyvoase/django-qmixin',
description = "A Django app for extending managers and the querysets they produce.",
packages = find_packages(where='src'),
package_dir = {'': 'src'},
)
UTF-8
Python
false
false
2,010
2,877,628,095,679
4498d0ffde03918f755b2aa7df565030062cc86b
331f942824352e9f98a3224f7430e9cec926fdc3
/minimum-depth-of-binary-tree.py
d18865c20fd26e955b7f9af7260f5416628b3a87
[]
no_license
adam-barnett/LeetCode
https://github.com/adam-barnett/LeetCode
8776b64e8ca7802f4b788abd07e7f32cbeb653b3
e2a03669b2116d7906ebb9d886c601c604bc1a74
refs/heads/master
2021-01-03T13:20:20.917508
2014-06-05T15:18:32
2014-06-05T15:18:32
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""
Given a binary tree, find its minimum depth. The minimum depth
is the number of nodes along the shortest path from the root
node down to the nearest leaf node. Problem found here:
http://oj.leetcode.com/problems/minimum-depth-of-binary-tree/
"""
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def minDepth(self, root, depth=1):
if root == None:
return depth - 1
if root.left == None and root.right == None:
return depth
elif root.left == None:
return self.minDepth(root.right, depth + 1)
elif root.right == None:
return self.minDepth(root.left, depth + 1)
else:
return min(self.minDepth(root.left, depth + 1),
self.minDepth(root.right, depth + 1))
#test
sol = Solution()
root = TreeNode(0)
current_node = root
n = 5
for i in xrange(n):
current_node.left = TreeNode(i)
current_node = current_node.left
print 'expected value: ', n + 1, " returned value: ", sol.minDepth(root)
root.right = TreeNode(1)
print 'expected value: ', 2, " returned value: ", sol.minDepth(root)
UTF-8
Python
false
false
2,014
13,443,247,651,431
c38903219c14c91aeeff7568b9d7361c034d6f09
385a3e94da7f982ebe043bed7025ef692e7e9083
/Classifyem/visualize_hidden.py
66969d94d2deba35a9bf269fdb45454c2dbaef26
[]
no_license
hhamin/DeepBeliefBird
https://github.com/hhamin/DeepBeliefBird
4af2725b04e940c272ebed9f55fefdf21acc5a49
a08f2f010af244f46a029a0295475789e02076b9
refs/heads/master
2021-01-17T15:04:34.428678
2013-03-28T11:58:02
2013-03-28T11:58:02
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import cPickle
import theano
import numpy as np
import pylab as pl
import sys
import time
from sklearn import linear_model
sys.path.insert(0, '//home/timom/git/DeepBeliefBird')
import SongFeatures.birdsongUtil as bsu
import SongFeatures.preprocessing as pp
import Classifyem.logisticBirds as lb
def calcSimilarityMatrix(coef,n=5,plotting=True):
'''
calculates Similarity matrix based on regression coefficients. because hidden representation
is a higher order rep and slightly different inputs should activate the same hidden unit
in:
coef - regression coefficients [numClasses*numhidden]
n - n most positive and n most negative coefficients to consider
plotting ...
out:
SM - Similarity Matrix
'''
SM = np.zeros((coef.shape[0],coef.shape[0]))
s=np.argsort(coef,axis=1)
for i in range(coef.shape[0]):
for j in range(coef.shape[0]):
if i!=j:
SM[i,j]=np.sum(np.in1d(s[i,-n:],s[j,-n:],True))
SM[i,j]+=np.sum(np.in1d(s[i,:n],s[j,:n],True))
SM[i,j]/=(n*2.0)
if plotting:
pl.figure()
pl.imshow(SM,interpolation='nearest',cmap='Greys')
pl.title('Similarity Matrix with Diagonal set to 0 || %i out of %i hidden units compared' %(n*2,coef.shape[1]))
pl.colorbar()
pl.xlabel('Class')
pl.ylabel('Class')
pl.show()
return SM
def visual_frames(data,targets,C=1,showCoef=False):
'''
visualizes for each individual frame some interesting hidden neurons
interesting: large regression coefficients (absolute terms)
in:
data: [numFrames*numHidden]
targets: [numFrames] with class (Syllable) labels corresponding to data
out:
nuescht
'''
converter={'0':0,'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7} #0 reserved for no syllable
inv_converter=dict([(v,k) for (k,v) in converter.items()])\
lg = linear_model.LogisticRegression(penalty='l1', C=C)
lg.fit(data, targets)
numClasses=int(np.max(targets)+1) #+1 for 0 class
calcSimilarityMatrix(lg.coef_)
if showCoef:
pl.figure()
pl.subplot(211)
pl.imshow(lg.coef_,interpolation='nearest')
pl.colorbar()
lg2 = linear_model.LogisticRegression(penalty='l1', C=1)
lg2.fit(data, targets)
pl.subplot(212)
pl.imshow(lg2.coef_,interpolation='nearest')
pl.colorbar()
pl.show()
numInteresting= 1
int_hidden=np.argsort(lg.coef_,axis=1)
hidden_ex= np.zeros((numInteresting,numClasses))
hidden_inh= np.zeros((numInteresting,numClasses))
pl.ion()
pl.figure()
pl.title('Hidden Neurons with large regression coefficients')
pl.hold(False)
for i in range(targets.shape[0]):
for j in range(numClasses):
hidden_ex[:,j]=data[i,int_hidden[j,-1:-numInteresting-1:-1]]
hidden_inh[:,j]=data[i,int_hidden[j,:numInteresting]]
pl.subplot(121)
pl.imshow(hidden_ex.T,interpolation='nearest',cmap='Greys')
pl.xticks(range(numInteresting),range(numInteresting))
pl.ylabel('Classes')
pl.xlabel('"excitatory" hidden units')
#=======================================================================
# if i == 0:
# pl.colorbar()
#=======================================================================
pl.text(-1.5,int(targets[i])+0.3,inv_converter[int(targets[i])] , bbox=dict(boxstyle="round", fc="0.8",facecolor='white', alpha=0.7),fontsize=30)
pl.subplot(122)
pl.imshow(hidden_inh.T,interpolation='nearest',cmap='Greys')
pl.xticks(range(numInteresting),range(numInteresting))
#pl.ylabel('Classes')
pl.yticks([])
pl.xlabel('"Inhibitory" hidden units')
pl.draw()
time.sleep(0.03)
if __name__ == '__main__':
path= '/home/timom/git/DeepBeliefBird/SongFeatures/Motifs/1189/'
tadbn_file='//home/timom/git/DeepBeliefBird/deep/trained_models/1024_25_300_0.05_FB_1189.pkl'
#===========================================================================
# data,targets=lb.createData(path,tadbn_file =tadbn_file ,method='tadbn',nfft=1024,hopF=2,batchsize=1,filterbank=True)
# out = open('datatargets.pkl', 'wb')
# cPickle.dump([data,targets],out)
# out.close()
#===========================================================================
pkl = open('datatargets.pkl', 'rb')
temp=cPickle.load(pkl)
data=temp[0]
targets=temp[1]
visual_frames(data,targets,C=0.05,showCoef=True)
#visualize_frames(songpath)
#visualize(songpath)
UTF-8
Python
false
false
2,013
1,116,691,543,747
0d8b026a3ff66c5b20eae55ee0b827c2a1a35cca
a51fb5bcf1e8309721b07ecb0164cda373cefb13
/task054.py
148f986c3d7e84608b61e9bb95c56b0ac03cb056
[]
no_license
laqie/euler
https://github.com/laqie/euler
e0193b54008f303d800f237038867fa076de6eaf
24136a95bf62a208d374f89c04390bc81952c6b7
refs/heads/master
2016-08-02T23:42:18.262509
2013-08-28T03:33:16
2013-08-28T03:33:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from collections import Counter
CARDS = ['2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K', 'A']
HAND_RANK = ['HC', 'OP', 'TP', 'TK', 'ST', 'FL', 'FH', 'FK', 'SF', 'RF']
RANKS = dict((card, index) for index, card in enumerate(CARDS + HAND_RANK))
def parse_hand(hand):
cards = sorted([card[0] for card in hand], cmp=lambda x, y: RANKS[x] - RANKS[y])
suits = sorted([card[1] for card in hand])
return cards, suits
def is_flush(_, suits):
return True if suits[0] == suits[-1] else False
def is_straight(cards, _):
high_card = CARDS.index(cards[-1])
if CARDS[high_card] == CARDS[-1]:
if cards[:-1] == CARDS[:4]:
return True
if cards == CARDS[high_card - 4:high_card + 1]:
return True
return False
def is_royal(cards, suits):
if is_flush(cards, suits) and is_straight(cards, suits) and cards[-1] == 'A' and cards[0] == 'T':
return True
return False
def is_straightflush(cards, suits):
if is_flush(cards, suits) and is_straight(cards, suits):
return True
return False
def get_score(hand):
cards, suits = parse_hand(hand)
counter = Counter(cards)
length = len(counter)
high_card = cards[-1]
if is_royal(cards, suits):
return RANKS['RF']
if is_straightflush(cards, suits):
return RANKS['SF'] + RANKS[high_card] * 0.01
if length == 2 and counter.most_common(1)[0][1] == 4:
if counter.most_common(1)[0][0] == high_card:
high_card1 = counter.most_common(2)[1][0]
else:
high_card1 = high_card
high_card = counter.most_common(1)[0][0]
return RANKS['FK'] + RANKS[high_card] * 0.01 + RANKS[high_card1] * 0.001
if length == 2 and counter.most_common(1)[0][1] == 3:
if counter.most_common(1)[0][0] == high_card:
high_card1 = counter.most_common(2)[1][0]
else:
high_card1 = high_card
high_card = counter.most_common(1)[0][0]
return RANKS['FH'] + RANKS[high_card] * 0.01 + RANKS[high_card1] * 0.001
if is_flush(cards, suits):
return RANKS['FL'] + RANKS[high_card] * 0.01
if is_straight(cards, suits):
if high_card == 'A':
high_card = '5'
return RANKS['ST'] + RANKS[high_card] * 0.01
if length == 3 and counter.most_common(1)[0][1] == 3:
hand_card = counter.most_common(1)[0][0]
if hand_card == high_card:
high_card = sorted(
[counter.most_common(3)[1][0], counter.most_common(3)[2][0]],
cmp=lambda x, y: RANKS[x] - RANKS[y])[-1]
return RANKS['TK'] + RANKS[hand_card] * 0.01 + RANKS[high_card] * 0.001
if length == 3 and counter.most_common(1)[0][1] == 2:
hand_card1 = counter.most_common(1)[0][0]
hand_card2 = counter.most_common(2)[1][0]
if high_card == hand_card1 or high_card == hand_card2:
high_card = counter.most_common(3)[2][0]
return RANKS['TP'] + (RANKS[hand_card1] + RANKS[hand_card2]) * 0.01 + RANKS[high_card] * 0.001
if length == 4:
hand_card = counter.most_common(1)[0][0]
if hand_card == high_card:
high_card = sorted(
[counter.most_common(4)[1][0], counter.most_common(4)[2][0], counter.most_common(4)[2][0]],
cmp=lambda x, y: RANKS[x] - RANKS[y])[-1]
return RANKS['OP'] + RANKS[hand_card] * 0.01 + RANKS[high_card] * 0.001
return RANKS[high_card] * 0.01
with open('poker.txt') as f:
hands = map(str.split, f.readlines())
print len(filter(lambda game: get_score(game[:5]) > get_score(game[5:]), hands))
#!/usr/bin/env python
# encoding: utf-8
"""
staff.py
Created by Francesco Porcari on 2009-12-31.
Copyright (c) 2009 __MyCompanyName__. All rights reserved.
"""
import sys
import os
def main():
pass
if __name__ == '__main__':
main()
UTF-8
Python
false
false
2,011
5,557,687,703,353
3f13f452d4086967e8fb2eaa5f5b5325ed3d51a7
a5ea8f9c161f6e6bae6ef81c6f0d7cb72451a27a
/week0/simple/24.py
d92a09b6b6335fe96057937fd0cc99f57a31ef28
[]
no_license
hconkov/hackbulgaria
https://github.com/hconkov/hackbulgaria
8225268bc4009d2f1ff9dadac1b958ac03026f70
1c6af7e48dfc63370fb6aab86a454b999c0353e3
refs/heads/master
2020-04-16T10:12:10.460179
2014-12-05T16:23:15
2014-12-05T16:23:15
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
def iterations_of_nan_expand(expanded):
if expanded == '':
return 0
elif expanded.count("NaN") != 1:
return False
else:
return(expanded.count("Not a "))
print(iterations_of_nan_expand(""))
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2013 Jonas Kubilius
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A wrapper of PyMVPA2 for simple fMRI analyses using SPM preprocessing.
Currently only signal, SVM, and correlational analyzes are stable. Other
features are not as extensively tested.
.. warning:: This library has not been thoroughly tested yet!
"""
import os, sys, glob, shutil, warnings
import cPickle as pickle
import numpy as np
import pandas
import mvpa2.suite
import nibabel as nb
# some modules are only available in Python 2.6
try:
from collections import OrderedDict
except:
from exp import OrderedDict
# stuff from psychopy_ext
import plot, stats
class Analysis(object):
"""
For conducting functional magnetic resonance imaging analyses.
Assumptions:
1. For beta- and t-values, analyses were done is SPM.
2. Functional runs named as `func_<runNo>`, anatomicals named as
`struct<optional extra stuff>`
3. Beta-values model every condition, including fixation. But
t-values are computed as each conditions versus a fixation.
4. Conditions are in the 'cond' column of data files, trial
duration is in 'dur'
:Args:
paths (dict of str:str pairs)
A dictionary of paths where data is stored. Expected to have at
least the following keys:
- 'analysis' (for storing analysis outputs),
- 'data_behav' (behavioral data with condition labels),
- 'data_fmri',
- 'rec' (for ROIs from surface reconstruction in Caret or so),
- 'data_rois' (for storing the extracted signals in these ROIs),
:Kwargs:
- extraInfo (dict, default: None)
All parameters related to participant information
- runParams (dict, default: None)
All runtime parameters that you want to be able to access from GUI
or CLI. Expected to have at least:
- noOutput
- verbose
- force
- tr (int, default: None)
Time of repetition of your fMRI scans. This information is not
reliably coded in NIfTI files, so you need to define it yourself.
- fmri_prefix (str, default: 'swa*')
Prefix of SPM output functional scans that you want to analyze.
- rois (list of str)
A list of ROIs to analyze. See :func:`make_roi_pattern` for
accepted formats.
"""
def __init__(self,
paths,
tr,
extraInfo=None,
runParams=None,
fmri_prefix='swa*',
rois=None,
):
self.extraInfo = extraInfo
self.runParams = runParams
# minimal parameters that Analysis expects in extraInfo and runParams
self.extraInfo = OrderedDict([
('subjID', 'subj'),
('runType', 'main'),
])
self.runParams = OrderedDict([
('method', 'timecourse'),
('values', 'raw'),
('noOutput', False),
('debug', False),
('verbose', True),
('visualize', False),
('force', False),
('dry', False)
])
if extraInfo is not None:
self.extraInfo.update(extraInfo)
if runParams is not None:
self.runParams.update(runParams)
self.paths = paths
self.tr = tr
self.fmri_prefix = fmri_prefix
self.rois = make_roi_pattern(runParams['rois'])
def run(self):
"""
A wrapper for running an analysis specified in `self.runParams`.
Steps:
- Try to load a saved analysis, unless a `force` flag is given
- Otherwise, either generate synthetic data (values = `sim`) or
extract it from the real data using :func:`run_method`.
- Save a `pandas.DataFrame` in the analysis folder with the
filename like `df_<method>_<values>.pkl`
:Returns:
A DataFrame with the output of a particular analysis in the
`subjResp` column, and a file name where that data is stored.
"""
df, df_fname = self.get_df()
if self.runParams['plot']:
self.plot(df)
return df, df_fname
def get_df(self):
df_fname = (self.paths['analysis']+'df_%s_%s.pkl' %
(self.runParams['method'], self.runParams['values']))
try:
if self.runParams['force']:
raise # redo the analysis
else:
df = pickle.load(open(df_fname,'rb'))
if self.runParams['verbose']:
print ("loaded stored dataset of %s %s results" %
(self.runParams['values'], self.runParams['method']))
except:
res_fname = self.paths['analysis']+'%s_%s_%s.pkl'
# generate some fake data to check a particular hypothesis
if self.runParams['values'] == 'sim':
simds = self.genFakeData()
else:
simds = None
header, results = self.run_method(self.extraInfo['subjID'],
self.extraInfo['runType'], self.rois, offset=self.offset,
dur=self.dur, filename=res_fname,
method=self.runParams['method'], values=self.runParams['values'],
simds=simds)
df = pandas.DataFrame(results, columns=header)
if not self.runParams['noOutput']:
pickle.dump(df, open(df_fname,'wb'))
if self.runParams['verbose']:
print ("saved dataset of %s %s results to %s" %
(self.runParams['values'], self.runParams['method'],
df_fname))
return df, df_fname
def plot(self, df, plt=None):
if plt is None:
plt = plot.Plot(nrows_ncols=(3,2), figsize=(10, 12))
if self.runParams['method'] == 'timecourse':
plot_timecourse(df, plt=plt,
cols=['stim1.cond', 'name'])
else:
agg = self.get_data(df)
order = [r[1] for r in self.rois]
plt.plot(agg, subplots='ROI', subplots_order=order, kind='bar')
plt.tight_layout()
if self.runParams['saveplot'] and not self.runParams['noOutput']:
plt.savefig(self.paths['analysis']+'%s_%s.png' %
(self.runParams['method'],
self.runParams['values'])
)
plt.show()
def run_method(self, subjIDs, runType, rois, method='svm', values='raw',
offset=None, dur=None, filename = 'RENAME.pkl', simds=None):
"""
A wrapper for running a specified analysis.
Process:
1. Attempt to load stored results from the analysis that was done
before. (stored in teh analysis folder in a file
`<method>_<values>_<subjID>.pkl`
2. If that fails, it's probably because the analysis has
not been performed yet or, in rare cases, because the data
file is corrupt or missing. So a new analysis is initiated.
1. First, Regions of Interest (ROIs) are loaded from ``PATHS['data_rois']``
2. If that is not possible, then ROIs are extracted from
anatomical data using functional localizer data from SPM.
3. Extracted ROIs are stored in ``PATHS['data_rois']``.
4. Finally, the specified analysis is performed.
:Args:
- subjIDs (str of list of str)
Which participants should be analyzed
- runType (str)
Which run type should be taken. Usually you have a few runs,
such as main experimental runs and localizer runs. They should
have be labeled data file
:Kwargs:
- method: {'timecourse', 'univariate', 'signal', 'corr', 'svm'} (default: 'svm'}
Method to analyze data.
- values: {'raw', 'beta', 't'}
fMRI signal values to use. If 'raw', you have to pass offset
and duration. If you intend to try a few different parameters
for 'raw', e.g. a duration of 1 TR and 3 TRs, you may indicate
that in the value parameter like ``values='raw_3'`` which will
be useful in order not to confuse output files (they get
prefixes based on the value name).
e.g.:
offset = {'V1': 4, 'V2': 4, 'V3': 4, 'LO': 3, 'pFs': 3}
dur = 1
"""
if type(subjIDs) not in [list, tuple]:
subjIDs = [subjIDs]
results = []
#loadable = []
## quick way to see if we need to import mvpa2.suite
#for sNo, subjID in enumerate(subjIDs):
#try:
#filename_full = filename % (method, values, subjID)
#except:
#pass
#loadable.append(os.path.isfile(filename_full))
#import pdb; pdb.set_trace()
#if not np.all(loadable):
for subjID in subjIDs:
print subjID,
try:
out_fname = filename % (method, values, subjID)
except:
pass
loaded = False
if method in ['corr', 'svm']:
try:
header, result = pickle.load(open(out_fname,'rb'))
results.extend(result)
# result = pickle.load(open(filename,'rb'))
# header = [i[0] for i in result[0]]
# for res in result:
# results.append([r[1] for r in res])
print ': loaded stored %s results' % values
loaded = True
except:
print
print "Could not load or find the results file %s" % out_fname
print "Will proceed to do %s analysis from scratch" % method
if not loaded:
temp_res = []
for r, ROI_list in enumerate(rois):
print ROI_list[1],
if simds is not None:
values = 'sim'
else:
ds = self.extract_samples(subjID, runType, ROI_list,
values=values)
if values.startswith('raw'):
ds = self.detrend(ds)
if type(offset) == dict: # different offsets for ROIs
off = offset[ROI_list[1]]
else:
off = offset
ds = self.nan_to_num(ds, value=0)
evds = self.ds2evds(ds, offset=off, dur=dur)
elif values in ['t', 'beta', 'sim']:
# SPM sets certain voxels to NaNs
# we just gonna convert them to 0
evds = self.nan_to_num(ds)
if method == 'timecourse':
header, result = self.get_timecourse(evds)
elif method in ['signal', 'univariate']:
header, result = self.get_signal(evds, values)
elif method == 'corr':
evds = evds[evds.sa.targets != 0]
header, result = self.get_correlation(evds, nIter=100)
elif method == 'svm':
evds = evds[evds.sa.targets != 0]
header, result = self.get_svm(evds, nIter=100)
else:
raise NotImplementedError('Analysis for %s values is not '
'implemented')
header.extend(['subjID', 'ROI'])
for line in result:
line.extend([subjID, ROI_list[1]])
temp_res.append(line)
print
results.extend(temp_res)
# import pdb; pdb.set_trace()
if not self.noOutput and method in ['corr', 'svm']:
# mvpa2.suite.h5save(rp.o, results)
try:
os.makedirs(self.paths['analysis'])
except:
pass
pickle.dump([header,temp_res], open(out_fname,'wb'))
return header, results
#def time_course(self):
#ds = self.extract_samples(subjID, runType, ROI_list,
#values=values)
#return thisloop
#@loop
# def time_course(self, subjID, runType, ROI_list):
# ds = self.extract_samples(subjID, runType, ROI_list)
# ds = self.detrend(ds)
# evds = self.ds2evds(ds, offset=0, dur=8)
# # mvpamod.plotChunks(ds,evds,chunks=[0])
# return self.get_psc(evds)
# def univariate(self, subjID, runType, ROI_list):
# ds = self.extract_samples(subjID, runType, ROI_list)
# ds = self.detrend(ds)
# evds = self.ds2evds(ds, offset=3, dur=3)
# # mvpamod.plotChunks(ds,evds,chunks=[0])
# return self.psc_diff(evds)
# #@loop
# def mvpa(self, subjID, runType, ROI_list, offset, dur):
# """Performs an SVM classification.
# **Parameters**
# clf: 'SMLR', 'LinearCSVMC', 'LinearNuSVMC', 'RbfNuSVMC', or 'RbfCSVMC', or a list of them
# A name of the classifier to be used
# """
# ds = self.extract_samples(subjID, runType, ROI_list)
# ds = self.detrend(ds)
# evds = self.ds2evds(ds, offset=offset, dur=dur)
# evds = evds[evds.sa.targets != 0]
# return self.svm(evds)
def get_data(self, filename):
"""
Get MRI data with the affine transformation (world coordinates) applied.
:Args:
filename (str)
A filename of data to load
"""
nim = nb.load(filename)
data = nim.get_data()
# reorient data based on the affine information in the header
ori = nb.io_orientation(nim.get_affine())
data = nb.apply_orientation(data, ori)
data = np.squeeze(data) # remove singular dimensions (useful for ROIs)
return data
def extract_samples(self,
subjID,
# runNo,
runType,
ROIs,
values='raw'
):
"""
Produces a detrended dataset with info for classifiers.
:Args:
- subjID (str)
participant ID
- runType (str)
run type (useful if, for example, you also have
localizer runs which you probably want to analyze separately from
the experimental runs)
- ROIs (list)
A pattern of ROI file patterns to be combined into one ROI
:Kwargs:
values (str, default: 'raw')
What kind of values should be used. Usually you
have 'raw', 'beta', and 't'.
:Returns:
ds (Dataset)
"""
reuse = True
if values.startswith('raw'):
add = ''
else:
add = '_' + values
suffix = ROIs[1] + add + '.gz.hdf5'
roiname = self.paths['data_rois'] %subjID + suffix
if reuse and os.path.isfile(roiname):
ds = mvpa2.suite.h5load(roiname)
print '(loaded)',
return ds
# else
# make a mask by combining all ROIs
allROIs = []
for ROI in ROIs[2]:
theseROIs = glob.glob((self.paths['rec'] + ROI + '.nii') %subjID)
allROIs.extend(theseROIs)
# add together all ROIs -- and they should not overlap too much
thisMask = sum([self.get_data(roi) for roi in allROIs])
if values.startswith('raw'):
# find all functional runs of a given runType
allImg = glob.glob((self.paths['data_fmri'] + self.fmri_prefix + \
runType + '.nii') % subjID)
data_path = self.paths['data_behav']+'data_%02d_%s.csv'
labels = self.extract_labels(allImg, data_path, subjID, runType)
ds = self.fmri_dataset(allImg, labels, thisMask)
elif values == 'beta':
data_path = self.paths['data_behav'] + 'data_*_%s.csv'
behav_data = self.read_csvs(data_path %(subjID, runType))
try:
labels = np.unique(behav_data['stim1.cond']).tolist()
except:
labels = np.unique(behav_data['cond']).tolist()
numRuns = len(np.unique(behav_data['runNo']))
analysis_path = self.paths['spm_analysis'] % subjID + runType + '/'
betaval = np.array(sorted(glob.glob(analysis_path + 'beta_*.img')))
if len(betaval) != (len(labels) + 6) * numRuns + numRuns:
raise Exception('Number of beta value files is incorrect '
'for participant %s' % subjID)
select = [True]*len(labels) + [False]*6
select = np.array(select*numRuns + [False]*numRuns)
allImg = betaval[select]
ds = []
nLabels = len(labels)
for runNo in range(numRuns):
ds.append( mvpa2.suite.fmri_dataset(
samples = allImg[runNo*nLabels:(runNo+1)*nLabels].tolist(),
targets = labels,
chunks = runNo,
mask = thisMask
))
ds = mvpa2.suite.vstack(ds)
elif values == 't':
data_path = self.paths['data_behav'] + 'data_*_%s.csv'
behav_data = self.read_csvs(data_path %(subjID, runType))
try:
labels = np.unique(behav_data['stim1.cond']).tolist()
except:
labels = np.unique(behav_data['cond']).tolist()
# t-values did not model all > fixation, so we skip it now
labels = labels[1:]
numRuns = len(np.unique(behav_data['runNo']))
analysis_path = self.paths['spm_analysis'] % subjID + runType + '/'
tval = np.array(sorted(glob.glob(analysis_path + 'spmT_*.img')))
if len(tval) != (numRuns + 1) * len(labels):
raise Exception('Number of t value files is incorrect '
'for participant %s' % subjID)
allImg = tval[np.arange(len(tval)) % (numRuns+1) != numRuns]
ds = mvpa2.suite.fmri_dataset(
samples = allImg.tolist(),
targets = np.repeat(labels, numRuns).tolist(),
chunks = np.tile(np.arange(numRuns), len(labels)).tolist(),
mask = thisMask
)
else:
raise Exception('values %s are not recognized' % values)
if not self.runParams['noOutput']: # save the extracted data
try:
os.makedirs(self.paths['data_rois'] %subjID)
except:
pass
mvpa2.suite.h5save(roiname, ds, compression=9)
return ds
def extract_labels(self, img_fnames, data_path, subjID, runType):
"""
Extracts data labels (targets) from behavioral data files.
.. note:: Assumes that each block/condition is a multiple of TR.
"""
labels = []
for img_fname in img_fnames:
runNo = int(img_fname.split('_')[-2])
behav_data = self.read_csvs(data_path %(subjID, runNo, runType))
# indicate which condition was present for each acquisition
# FIX: !!!ASSUMES!!! that each block/condition is a multiple of TR
run_labels = []
for lineNo, line in behav_data.iterrows():
# how many TRs per block or condition
repeat = int(line['dur'] / self.tr) # FIX
run_labels.extend( [line['cond']] * repeat ) #FIX
labels.append(run_labels)
return labels
def fmri_dataset(self, samples, labels, thisMask=None):
"""
Create a dataset from an fMRI timeseries image.
Overrides `mvpa2.datasets.mri.fmri_dataset` which has a buggy multiple
images reading.
"""
# Load in data for all runs and all ROIs
chunkCount = 0
first = True
for thisImg, thisLabel in zip(samples,labels):
# load the appropriate func file with a mask
tempNim = mvpa2.suite.fmri_dataset(
samples = thisImg,
targets = thisLabel,
chunks = chunkCount,
mask = thisMask
)
# combine all functional runs into one massive NIfTI Dataset
if first:
ds = tempNim
first = False
else:
ds = mvpa2.suite.vstack((ds,tempNim))
chunkCount += 1
return ds
def detrend(self, ds):
"""
Second-order detrending of data per chunk with the mean added back for
a convenient percent signal change calculation.
"""
dsmean = np.mean(ds.samples)
mvpa2.suite.poly_detrend(ds, polyord=2, chunks_attr='chunks')
ds.samples += dsmean # recover the detrended mean
return ds
def ds2evds(self, ds, offset=2, dur=2):
"""
Converts a dataset to an event-related dataset.
:Args:
ds
:Kwargs:
- offset (int, default: 2)
How much labels should be shifted due to the hemodynamic lag. A
good practice is to first plot data to see where the peaks are
- dur (int, default: 2)
How many timepoints per condition.
"""
# if self.visualize: self.plotChunks(ds, chunks=[0], shiftTp=2)
# convert to an event-related design
events = mvpa2.suite.find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
# import pdb; pdb.set_trace()
# Remove the first and the last fixation period of each block
# We don't want any overlap between chunks
events_temp = []
for evNo, ev in enumerate(events):
if evNo != 0 and evNo != len(events)-1:
if ev['chunks'] == events[evNo-1]['chunks'] and \
ev['chunks'] == events[evNo+1]['chunks']:
events_temp.append(ev)
events = events_temp
for ev in events:
ev['onset'] += offset # offset since the peak is at 6-8 sec
ev['duration'] = dur # use two time points as peaks since they are both high
evds = mvpa2.suite.eventrelated_dataset(ds, events=events)
if self.visualize: self.plotChunks(ds, evds, chunks=[0], shiftTp=0)
return evds
def plotChunks(self, ds, evds, chunks = None, shiftTp = 0):
events = mvpa2.suite.find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
# which chunks to display
if chunks == None: chunks = ds.UC
# get colors and assign them to targets
numColors = len(ds.UT)
cmap = mpl.cm.get_cmap('Paired')
norm = mpl.colors.Normalize(0, 1)
z = np.linspace(0, 1, numColors + 2)
z = z[1:-1]
colors_tmp = cmap(norm(z))
colors = {}
for target, color in zip(ds.UT,colors_tmp): colors[target] = color
chunkLen = ds.shape[0] / len(ds.UC)
#
eventDur = evds.a.mapper[1].boxlength
# evdsFlat = evds.a.mapper[2].reverse(evds)
# ds = evds.a.mapper[1].reverse(evdsFlat)
for chunkNo, chunk in enumerate(chunks):
plt.subplot( len(chunks), 1, chunkNo+1 )
plt.title('Runs with conditions shifted by %d' %shiftTp)
sel = np.array([i==chunk for i in evds.sa.chunks])
sel_ds = np.array([i==chunk for i in ds.sa.chunks])
# import pdb; pdb.set_trace()
meanPerChunk = np.mean(ds[sel_ds],1) # mean across voxels
plt.plot(meanPerChunk.T, '.')
# import pdb; pdb.set_trace()
for onset, target in zip(evds[sel].sa.event_onsetidx,
evds[sel].sa.targets):
# import pdb;pdb.set_trace()
plt.axvspan(
xmin = onset + shiftTp - .5,
xmax = onset + eventDur + shiftTp - .5,
facecolor = colors[target],
alpha=0.5)
for ev in events:
# import pdb; pdb.set_trace()
if ev['chunks'] == chunk:
plt.axvline(x=ev['onset']%chunkLen + shiftTp)
# xmin = ev['onset']%chunkLen + shiftTp,
# xmax = ev['onset']%chunkLen + ev['duration'] + shiftTp,
# facecolor = colors[ev['targets']],
# alpha=0.5)
plt.plot(meanPerChunk.T)
plt.show()
def get_timecourse(self, evds):
"""
For each condition, extracts all timepoints as specified in the evds
window, and averages across voxels
"""
baseline = evds[evds.sa.targets == 0].samples
baseline = evds.a.mapper[-1].reverse(baseline)
# average across all voxels and all blocks
baseline = np.mean(np.mean(baseline,2),0)
if np.any(baseline<0):
warnings.warn('Some baseline values are negative')
# now plot the mean timeseries and standard error
header = ['cond', 'time', 'subjResp']
results = []
for cond in evds.UT:
if cond != 0:
evdsMean = evds[np.array([t == cond for t in evds.sa.targets])].samples
# recover 3D evds structure: measurements x time points x voxels
evdsMean = evds.a.mapper[-1].reverse(evdsMean)
# average across all voxels and measurements
evdsMean = np.mean(np.mean(evdsMean,2),0)
thispsc = (evdsMean - baseline) / baseline * 100
#time = np.arange(len(thispsc))*self.tr
for pno, p in enumerate(thispsc):
results.append([cond, pno*self.tr, p])
return header, results
def get_signal(self, evds, values):
"""
Extracts fMRI signal.
.. note:: Assumes the condition 0 is the fixation condition, which
will be used in percent signal change computations of raw
values
.. warning:: must be reviewed
:Args:
- evds (event-related mvpa dataset)
- values {'raw', 'beta', 't'}
:Returns:
fMRI signal for each condition (against the fixation condition)
"""
header = ['cond', 'subjResp']
results = []
# calculate the mean per target per chunk (across trials)
run_averager = mvpa2.suite.mean_group_sample(['targets','chunks'])
evds_avg = evds.get_mapped(run_averager)
# calculate mean across conditions per chunk per voxel
target_averager = mvpa2.suite.mean_group_sample(['chunks'])
mean = evds_avg.get_mapped(target_averager)
mean = np.mean(mean, 1) # mean across voxels
if values.startswith('raw') or values == 'beta':
baseline = mean[mean.sa.targets == 0].samples
#baseline = np.mean(baseline)
for cond in mean.UT:
if cond != 0:
sel = np.array([t == cond for t in mean.sa.targets])
mean_cond = mean[sel].samples
#evdsMean = np.mean(evds_cond)
if values.startswith('raw'):
mean_cond = (mean_cond - baseline) / baseline * 100
elif values == 'beta':
mean_cond = mean_cond - baseline
evdsMean = np.mean(mean_cond)
results.append([cond, evdsMean])
return header, results
def get_univariate(self, evds, values):
"""Alias for :func:`get_signal`
"""
return self.get_signal(evds, values)
def correlation(self, evds, nIter=100):
"""
Computes a correlation between multiple splits in half of the data.
Reported as one minus a correlation to provide a dissimilarity measure
as in svm.
:Args:
evds (event-related mvpa dataset)
:Kwargs:
nIter (int, default: 100)
Number of random splits in half of the entire dataset.
:Returns:
A header and a results matrix with four columns:
- iter: iteration number
- stim1.cond: first condition
- stim2.cond: second condition
- subjResp: one minus the correlation value
"""
# calculate the mean per target per chunk (across trials)
run_averager = mvpa2.suite.mean_group_sample(['targets','chunks'])
evds_avg = evds.get_mapped(run_averager)
numT = len(evds_avg.UT)
# calculate mean across conditions per chunk per voxel
target_averager = mvpa2.suite.mean_group_sample(['chunks'])
mean = evds_avg.get_mapped(target_averager)
# subtract the mean chunk-wise
evds_avg.samples -= np.repeat(mean, numT, 0)
#results = np.zeros((nIter,numT,numT))
runtype = [0,1] * (len(evds_avg.UC)/2) + \
[-1] * (len(evds_avg.UC)%2)
# for odd number of chunks (will get rid of one)
targets = evds_avg.UT
header = ['iter', 'stim1.cond', 'stim2.cond', 'subjResp']
results = []
for n in range(nIter):
np.random.shuffle(runtype)
evds_avg.sa['runtype'] = np.repeat(runtype,numT)
evds_split1 = evds_avg[np.array([i==0 for i in evds_avg.sa.runtype])]
run_averager = mvpa2.suite.mean_group_sample(['targets'])
evds_split1 = evds_split1.get_mapped(run_averager)
evds_split2 = evds_avg[np.array([i==1 for i in evds_avg.sa.runtype])]
run_averager = mvpa2.suite.mean_group_sample(['targets'])
evds_split2 = evds_split2.get_mapped(run_averager)
result = mvpa2.clfs.distance.one_minus_correlation(evds_split1.samples, evds_split2.samples)/2
for i in range(0, numT):
for j in range(0, numT):
results.append([n, targets[i], targets[j], result[i,j]])
return header, results
def svm(self, evds, nIter=100, clf=mvpa2.suite.LinearNuSVMC()):
"""
Runs a support vector machine pairwise.
.. note: Might be not the most efficient implementation of SVM, but
it is at least intuitive.
Process:
- Split data into a training set (about 75% of all values) and a testing
set (about 25% of values).
- For each pair of conditions, train the classifier.
- Then test on the average of the testing set, i.e., only on two
samples. This trick usually boosts the performance (credit:
Hans P. Op de Beeck)
:Args:
evds (event-related mvpa dataset)
:Kwargs:
- nIter (int, default: 100)
Number of random splits into a training and testing sets.
- clf (mvpa classfier, default: Linear Nu SVM)
:Returns:
A header and a results matrix with four columns:
- iter: iteration number
- stim1.cond: first condition
- stim2.cond: second condition
- subjResp: one minus the correlation value
"""
# calculate the mean per target per chunk (across trials)
run_averager = mvpa2.suite.mean_group_sample(['targets','chunks'])
evds_avg = evds.get_mapped(run_averager)
numT = len(evds_avg.UT)
# subtract the mean across voxels (per target per chunk)
evds_avg.samples -= np.tile(np.mean(evds_avg, 1), (evds_avg.shape[1],1) ).T
# and divide by standard deviation across voxels
evds_avg.samples /= np.tile(np.std(evds_avg, axis=1, ddof=1),
(evds_avg.shape[1],1) ).T
## NEW
if len(evds_avg.UC)%2:
runtype = [0]*(len(evds_avg.UC)-9) + [1]*8 + [-1]
# for odd number of chunks (will get rid of one)
else:
runtype = [0]*(len(evds_avg.UC)-8) + [1]*8
###
## OLD
# if len(evds_avg.UC)%2:
# runtype = [0]*(len(evds_avg.UC)-3) + [1]*2 + [-1]
# # for odd number of chunks (will get rid of one)
# else:
# runtype = [0]*(len(evds_avg.UC)-2) + [1]*2
###
#targets = evds_avg.UT
header = ['iter', 'stim1.cond', 'stim2.cond', 'subjResp']
results = []
for n in range(nIter):
print n,
np.random.shuffle(runtype)
evds_avg.sa['runtype'] = np.repeat(runtype,numT)
evds_train = evds_avg[np.array([i==0 for i in evds_avg.sa.runtype])]
evds_test = evds_avg[np.array([i==1 for i in evds_avg.sa.runtype])]
## NEW
# boost results by averaging test patterns over chunks
run_averager = mvpa2.suite.mean_group_sample(['targets'])
evds_test = evds_test.get_mapped(run_averager)
###
for i in range(0, numT):
for j in range(0, numT):
targets = (evds_train.UT[i], evds_train.UT[j])
if i==j:
pred = None
else:
ind_train = np.array([k in targets for k in evds_train.sa.targets])
evds_train_ij = evds_train[ind_train]
ind_test = np.array([k in targets for k in evds_test.sa.targets])
# keep = np.logical_not(np.isnan(evds_test))
evds_test_ij = evds_test[ind_test]
# evds_test_ij = evds_test_ij[:,keep]
# fsel = mvpa2.suite.StaticFeatureSelection(keep)
# clf = mvpa2.suite.LinearNuSVMC()
# clf = mvpa2.suite.FeatureSelectionClassifier(clf, fsel)
clf.train(evds_train_ij)
#fsel = mvpa2.suite.SensitivityBasedFeatureSelection(
#mvpa2.suite.OneWayAnova(),
#mvpa2.suite.FractionTailSelector(0.05, mode='select', tail='upper'))
#fclf = mvpa2.suite.FeatureSelectionClassifier(clf, fsel)
#fclf.train(evds_train_ij)
# sensana = clf.get_sensitivity_analyzer()
# sens = sensana(evds_train_ij)
# inds = np.argsort(np.abs(sens.samples))
# inds = np.squeeze(inds)
# evds_train_ij.samples = evds_train_ij.samples[:,inds>=len(inds)-100]
# #import pdb; pdb.set_trace()
# clf.train(evds_train_ij)
# test_samp = evds_test_ij.samples[:,inds>=len(inds)-100]
# predictions = clf.predict(test_samp)
predictions = clf.predict(evds_test_ij.samples)
pred = np.mean(predictions == evds_test_ij.sa.targets)
results.append([n, targets[0], targets[1], pred])
print
return header, results
def dissimilarity(self,
evds,
method = 'svm',
nIter = 10, # how many iterations for # have more for SVM
meanFunc = 'across voxels',
):
"""
DEPRECATED.
Computes a dissimilarity (0 - very similar, 1 - very dissimilar) between
two splits of data over multiple iterations. If method is correlation,
dataset is split in half. If svm, leave-one-chunk.
"""
numT = len(evds.UT)
results = np.zeros((nIter,numT,numT))
# prepare split of data
# runtype is either 0 (train data or split1) or 1 (test data or split2)
if method=='corr': # splitHalf
# if odd, then one of the runs will be spared (value -1)
runtype = [0,1] * (len(evds.UC)/2) + [-1] * (len(evds.UC)%2)
elif method=='svm': # nFold split
if len(evds.UC)%2:
runtype = [0]*(len(evds.UC)-3) + [1,1] + [-1] # for odd
else:
runtype = [0]*(len(evds.UC)-2) + [1,1]
# if corr: cvtype = len(evds.UC)/2
# else: cvtype = 1
# nfolds = mvpa2.suite.NFoldPartitioner(cvtype=cvtype,count=10,selection_strategy='equidistant')
# import pdb; pdb.set_trace()
for n in range(nIter):
print n,
# we want each iteration to have a different (random) split
np.random.shuffle(runtype)
# for each datapoint within a chunk, assign the same runtype
evds.sa['runtype'] = np.repeat(runtype,len(evds.sa.chunks)/len(evds.UC))
# create an average per target per chunk (per voxel)
run_averager = mvpa2.suite.mean_group_sample(['targets','chunks'])
evds_avg = evds.get_mapped(run_averager)
# calculate mean and standard deviation across conditions per voxel
ds_split_train = evds_avg[np.array([i==0 for i in evds_avg.sa.runtype])]
mean_train = np.mean(ds_split_train,0) # mean per voxel
sd_train = np.std(ds_split_train, axis=0, ddof=1)
ds_split_test = evds_avg[np.array([i==1 for i in evds_avg.sa.runtype])]
mean_test = np.mean(ds_split_test,0)
sd_test = np.std(ds_split_test, axis=0, ddof=1)
targets = ds_split_train.UT
if np.sum(targets != ds_split_test.UT)>0:
sys.exit("Targets on the two splits don't match. Unbalanced design?")
# filling in the results matrix
for index,value in np.ndenumerate(results[n]):
# target pair for that particular matrix cell
indexT = (targets[index[0]], targets[index[1]])
ind_train = np.array([i in indexT for i in ds_split_train.sa.targets])
ds_train = ds_split_train[ind_train]
ds_train.samples -= mean_train
ds_train.samples /= sd_train
ind_test = np.array([i in indexT for i in ds_split_test.sa.targets])
ds_test = ds_split_test[ind_test]
ds_test.samples -= mean_test
ds_test.samples /= sd_test
# if index[0] == index[1]:
# # import pdb; pdb.set_trace()
# halfT1 = len(ds_train.sa.targets)/2
# ds_train.sa.targets = np.array([1,2]*halfT1)
# halfT2 = len(ds_test.sa.targets)/2
# ds_test.sa.targets = np.array([1,2]*halfT2)
if method=='corr':
cr = mvpa2.clfs.distance.one_minus_correlation(ds_train.samples,ds_test.samples)
# if one target then there's one correlation only
if index[0] == index[1]: acc = cr
else: acc = np.mean([ cr[0,1], cr[1,0] ])
results[n,index[0],index[1]] = acc
elif method=='svm':
if index[0] == index[1]: # can't do svm, so assume
results[n,index[0],index[1]] = 1
else:
clf = mvpa2.suite.LinearNuSVMC()
clf.train(ds_train)
predictions = clf.predict(ds_test.samples)
results[n,index[0],index[1]] = np.mean(predictions == ds_test.sa.targets)
# nfold = mvpa2.suite.NFoldPartitioner(cvtype=5,count=10,selection_strategy='equidistant')
# cvte = mvpa2.suite.CrossValidation(clf, HalfPartitioner(attr='runtype'),
# errorfx = lambda p, t: np.mean(p == t), # this makes it report accuracy, not error
# enable_ca=['stats'])
# cvte(ds_pair)
print
if self.visualize:
meanPerIter = np.mean(np.mean(results, 2), 1)
cumMean = np.cumsum(meanPerIter)/range(1, len(meanPerIter)+1)
plt.plot(cumMean)
plt.show()
return np.mean(results,0) # mean across folds
def searchlight(self, ds):
""" Basic searchlight analysis
"""
run_averager = mvpa2.suite.mean_group_sample(['targets', 'chunks'])
ds = ds.get_mapped(run_averager)
clf = mvpa2.suite.LinearNuSVMC()
cvte = mvpa2.suite.CrossValidation(clf, mvpa2.suite.NFoldPartitioner(),
errorfx = lambda p, t: np.mean(p == t),enable_ca=['stats'])
sl = mvpa2.suite.sphere_searchlight(cvte, radius=3, postproc=mvpa2.suite.mean_sample())
pairs = [
[(1,2),(1,3),(2,3)],
[(4,5),(4,6),(5,6)],
[(7,8),(7,9),(8,9)],
[(10,11),(10,12),(11,12)]
]
chance_level = .5
for pair in pairs:
thisDS = ds[np.array([i in pair for i in ds.sa.targets])]
res = sl(ds)
resOrig = res.a.mapper.reverse(res.samples)
print res_orig.shape
fig = plt.figure()
fig.subplot(221)
plt.imshow(np.mean(resOrig.samples,0), interpolation='nearest')
fig.subplot(222)
plt.imshow(np.mean(resOrig.samples,1), interpolation='nearest')
fig.subplot(223)
plt.imshow(np.mean(resOrig.samples,2), interpolation='nearest')
plt.show()
sphere_errors = res.samples[0]
res_mean = np.mean(res)
res_std = np.std(res)
import pdb; pdb.set_trace()
sphere_errors < chance_level - 2 * res_std
mri_args = {
'background' : os.path.join(datapath, 'anat.nii.gz'),
#'background_mask' : os.path.join(datapath, 'mask_brain.nii.gz'),
#'overlay_mask' : os.path.join(datapath, 'mask_gray.nii.gz'),
'cmap_bg' : 'gray',
'cmap_overlay' : 'autumn', # YlOrRd_r # pl.cm.autumn
'interactive' : cfg.getboolean('examples', 'interactive', True),
}
fig = plot_lightbox(overlay=map2nifti(dataset, sens),
vlim=(0, None), slices=18, **mri_args)
def _plot_slice(self, volume_path, rois=None, coords=None, fig=None):
"""
Plots a slice from the three sides.
.. note:: ROIs (masks) are averaged across all slices so that you
would definitely get to see the ROIs independent of the plotted slice.
:Args:
volume_path (str)
Path to the volume you want to plot.
:Kwargs:
- mask (str, default: None)
Path to the ROI data. If it contains data (i.e., it comes from
the `data_roi` folder), the data is
- rois
- coords (tuple of 3 or 4 ints; default: None)
- fig (:class:`plot.Plot`; default: None)
Pass an existing plot if you want to plot in it.
"""
if fig is None:
fig = plot.Plot(ncols=3)
showplot = True
else:
showplot = False # the caller probably has more plots to do
labels = ['parasagittal', 'coronal', 'horizontal']
allvols = glob.glob(volume_path)
if len(allvols) == 0:
raise Exception('Volume not found at %s' % volume_path)
for vol in allvols:
data = self.get_data(vol)
if coords is None or len(coords) <= 2:
coords = [m/2 for m in data.shape] # middle
if data.ndim == 4: # time volume
if len(coords) == 4:
data = data[:,:,:,coords[3]]
else:
data = data[:,:,:,0]
for i in range(3):
if i == 0:
mf = data[coords[i]]
elif i == 1:
mf = data[:, coords[i]]
else:
mf = data[:, :, coords[i]]
ax = fig.next()
ax.imshow(mf.T, cmap='gray', origin='lower',
interpolation='nearest')
ax.set_title('%s at %s' % (labels[i], coords[i]))
if rois is not None:
mask = sum([self.get_data(roi[1]) for roi in rois])
mean_mask = np.mean(mask, i).T
# make it uniform color
mean_mask[np.nonzero(mean_mask)] = 1. # ROI voxels are 1
mean_mask[mean_mask==0] = np.nan # non-ROI voxels are nan
mask_rgba = np.zeros(mean_mask.shape + (4,)) # add transparency
mask_rgba[:] = np.nan # default is nan
mask_rgba[:,:,0] = mean_mask # make mask red
mask_rgba[:,:,3] = mean_mask # transparency should have nans
ax.imshow(mask_rgba, alpha=.5,
origin='lower', interpolation='nearest')
if showplot:
fig.show()
def plot_roi(self):
"""
Plots Regions of Interest (ROIs) on the functional data.
"""
subjID = self.extraInfo['subjID']
if not isinstance(subjID, str):
raise TypeError('subjID is supposed to be a string, '
'but got %s instead' % subjID)
allROIs = []
for ROIs in self.rois:
for ROI in ROIs[2]:
theseROIs = glob.glob((self.paths['rec'] + ROI + '.nii') %subjID)
allROIs.extend(theseROIs)
if len(allROIs) == 0:
raise Exception('Could not find matching ROIS at %s' %
(self.paths['rec'] %subjID))
else:
allROIs = (None, '-'.join([r[1] for r in self.rois]), allROIs)
fig = plot.Plot(nrows=5, ncols=3, sharex=False, sharey=False)
self._plot_slice(self.paths['data_struct']
%subjID + 'wstruct*', fig=fig)
self._plot_slice(self.paths['data_fmri']
%subjID + 'swmeanafunc_*.nii', rois=allROIs[1], fig=fig)
# plot ROI values
ds = self.extract_samples(subjID, self.extraInfo['runType'],
allROIs, values=self.runParams['values'])
if not self.runParams['values'].startswith('raw'):
nans = np.sum(np.isnan(ds)) * 100. / ds.samples.size
title = '%d%% of ROI voxels are nans' % nans
else:
title = ''
ax = fig.next()
ax.hist(ds.samples.ravel(), label=title)
fig.hide_plots([-2,-1])
fig.show()
def _calc_nans(self):
pass
def genFakeData(self, nChunks = 4):
def fake(nConds = 12,nVoxels = 100):
# each voxel response per condition
fakeCond1 = np.array([0.5,1.]*(nVoxels/2))
# fakeCond1 = np.random.normal( loc=1,scale=1,size=(nVoxels,) )
# ROI's response to each condition
fakeCond1 = np.tile( fakeCond1, (nConds/2,1) )
# add noise
fakeDS1 = fakeCond1 + np.random.random((nConds/2,nVoxels))/10.
fakeCond2 = np.array([1.,.5,1.,5]*(nVoxels/4))
# fakeCond2 = np.random.normal(loc=3,scale=1,size= (nVoxels,) )
fakeCond2 = np.tile( fakeCond2, ( nConds/2,1 ) )
fakeDS2 = fakeCond2 + np.random.random((nConds/2,nVoxels))/10.
fakeChunk = np.vstack((fakeDS1,fakeDS2,fakeDS2[:,::-1],fakeDS1[:,::-1]))
targets = range(1,nConds+1)+range(nConds,0,-1)
fakeChunk = mvpa2.suite.dataset_wizard(samples=fakeChunk, targets=targets)
return fakeChunk
fakeDS = mvpa2.suite.multiple_chunks(fake,nChunks)
return fakeDS
def read_csvs(self, path):
"""
Reads multiple CSV files and concatinates tehm into a single
`pandas.DataFrame`
:Args:
path (str)
Where to find the data
"""
df_fnames = glob.glob(path)
dfs = []
for dtf in df_fnames:
dfs.append(pandas.read_csv(dtf))
return pandas.concat(dfs, ignore_index=True)
def roi_params(self,
rp,
subROIs = False,
suppressText = True,
space = 'talairach',
spm = False
):
"""
Calculates mean coordinates and the number of voxels of each given ROI.
**Parameters**
rp: Namespace (required)
Run parameters that are parsed from the command line
subROIs: True or False
If True, then subROIs are not combined together into an ROI
suppressText: True or False
If True, then nothing will be printed out
space: talairach or native
Choose the output to be either in native voxel space or in Talairach coordinates
spm: True or False
If True, then the coordinates in the voxel space are provided with
indices +1 to match MatLab's convention of starting arrays from 1.
"""
if subROIs: names = ['subjID','ROI','subROI','x','y','z','numVoxels']
else: names = ['subjID','ROI','x','y','z','numVoxels']
recs = []
# allCoords = np.zeros((1,4))
for subjIDno, subjID in enumerate(rp.subjID_list):
for ROI_list in rp.rois:
allROIs = []
for thisROI in ROI_list[2]:
allROIs.extend(q.listDir(scripts.core.init.paths['recDir'] %subjID,
pattern = thisROI + '\.nii', fullPath = True))
#import pdb; pdb.set_trace()
if allROIs != []:
SForm = nb.load(allROIs[0]).get_header().get_sform()
# check for overlap
# if subjID == 'twolines_06': import pdb; pdb.set_trace()
print [os.path.basename(subROI) for subROI in allROIs]
#
mask = sum([np.squeeze(nb.load(subROI).get_data()) for subROI in allROIs])
if not suppressText:
overlap = mask > 2
if np.sum(overlap) > 0:
print 'WARNING: Overlap in %(subjID)s %(ROI)s detected.'\
%{'subjID': subjID, 'ROI': ROI_list[1]}
if not subROIs: allROIs = [mask]
for subROI in allROIs:
if subROIs: subROIname = os.path.basename(os.path.abspath(subROI)).split('.')[0]
else: subROIname = ROI_list[1]
#import pdb; pdb.set_trace()
if subROIs: thisROI = nb.load(subROI).get_data()
else: thisROI = subROI
transROI = np.transpose(thisROI.nonzero())
meanROI = np.mean(transROI,0)[1:]
meanROI = meanROI[::-1] # reverse the order per convention
# convert to the Talairach coordinates
if space == 'talairach':
meanROI = np.dot(SForm, np.concatenate((meanROI,[1]))) # convert
meanROI = meanROI[:-1] # remove the last coordinate (slice number)
else:
meanROI = [m+spm for m in meanROI] # +1 to correct for SPM coords
if subROIs:
recs.append((subjID,ROI_list[1],subROIname)+tuple(meanROI)+(transROI.shape[0],))
else:
recs.append((subjID,subROIname)+tuple(meanROI)+(transROI.shape[0],))
ROIparams = tb.tabarray(records = recs, names = names)
if not suppressText:
if subROIs: on = ['ROI','subROI']
else: on = ['ROI']
ROImean = ROIparams.aggregate(On = on, AggFunc = np.mean,
AggFuncDict = {'subjID': lambda x: None})
xyz = ROIparams[['x','y','z']].extract().reshape((len(rp.subjID_list),-1,3))
xyzErr = np.std(xyz, axis = 0, ddof = 1)
# sort ROImean
numPerSubj = xyz.shape[1]
order = ROIparams[:numPerSubj][on]
order = order.addcols(range(len(order)), names=['order'])
order.sort(order=on)
ROImean.sort(order=on)
ROImean = ROImean.addcols(order[['order']].extract(), names = 'order')
ROImean.sort(order = 'order')
lenROI = min([len(ROI) for ROI in ROImean['ROI']])
if subROIs: lenSubROI = min([len(ROI) for ROI in ROImean['subROI']])
print
print ROIparams.dtype.names[1:]
for i, line in enumerate(ROImean):
print line['ROI'].ljust(lenROI+2),
if subROIs: print line['subROI'].ljust(lenSubROI+2),
print '%3d' %np.round(line['x']),
print u'\xb1 %d ' %np.round(xyzErr[i,0]),
print '%3d' %np.round(line['y']),
print u'\xb1 %d ' %np.round(xyzErr[i,1]),
print '%3d' %np.round(line['z']),
print u'\xb1 %d ' %np.round(xyzErr[i,2]),
print '%4d' %np.round(line['numVoxels'])
return ROIparams
class Preproc(object):
"""
Generates batch scripts from SPM preprocessing.
.. note:: Presently, only batch scripts for statistical analyses in SPM
are available.
:Args:
paths (dict of str:str pairs)
A dictionary of paths where data is stored. Expected to have at
least the following keys:
- 'fmri_root' for moving the original realignment parameter
(prefix `rp`) file
- 'data_behav' - where to find behavioral data with condition
labels (passed`condcol` variable), onsets, and durations
- 'data_fmri' - where to find fMRI functional data
- 'rec' (for ROIs from surface reconstruction in Caret or so)
- 'data_rois' (for storing the extracted signals in these ROIs)
"""
def __init__(self, paths):
self.paths = paths
def split_rp(self, subjID):
"""
Splits the file that has realignment information by run.
This is used for stats as each run with its covariates has to be
entered separately.
Assumptions:
- Realignment parameters are supposed to be called like
`rp_afunc_<runNo>.txt`
- Functional data is expected to be in the `paths['data_fmri']` folder
- `paths['fmri_root']` should also be specified so that the original
rp file would be backuped there.
:Args:
subjID (str)
For which subject the split is done.
"""
funcImg = glob.glob(self.paths['data_fmri'] % subjID + 'func_*_*.nii')
rp_pattern = self.paths['data_fmri'] % subjID + 'rp_afunc_*.txt'
rpFiles = glob.glob(rp_pattern)
if len(rpFiles) == 0: # probably split_rp has been done before
if self.runParams['verbose']:
print 'No rp files like %s found' % rp_pattern
else:
rp = []
for rpFile in rpFiles:
f = open(rpFile)
rp.extend(f.readlines())
f.close()
rp_bck = self.paths['fmri_root'] % subjID
rp_bck += os.path.basename(rpFile)
if not self.runParams['dry']:
shutil.move(rpFile, rp_bck)
else:
print '%s --> %s' % (rpFile, rp_bck)
last = 0
for func in funcImg:
runNo = func.split('.')[0].split('_')[-2]
dynScans = self.get_data(func).shape[3] # get number of acquisitions
runType = func.split('.')[0].split('_')[-1]
outName = self.paths['data_fmri']%subjID + 'rp_%s_%s.txt' %(runNo,runType)
if not self.runParams['dry']:
f = open(outName, 'w')
f.writelines(rp[last:last+dynScans])
f.close()
else:
print '%s: %s' % (func, outName)
last += dynScans
if len(rp) != last:
warnings.warn('Splitting was performed but the number of '
'lines in the rp file did not match the total number of '
'scans in the functional runs.')
def gen_stats_batch(self, subjID, runType=['main','loc','mer'],
condcol='cond', descrcol='name'):
"""
Generates a batch file for statistical analyses in SPM.
:Args:
subjID
:Kwargs:
- runType (str or list of str, default: ['main', 'loc', 'mer'])
The prefix of functional data files indicating which kind of
run it was.
- condcol (str)
Column in the data files with condition labels (numbers)
- descrcol (str)
Column in the data files with condition names
"""
if isinstance(runType, str):
runType = [runType]
self.split_rp(subjID)
# set the path where this stats job will sit
# all other paths will be coded as relative to this one
curpath = os.path.join(self.paths['fmri_root'] %subjID,'jobs')
f = open(os.path.join(curpath,'stats.m'),'w')
f.write("spm('defaults','fmri');\nspm_jobman('initcfg');\nclear matlabbatch\n\n")
for rtNo, runType in enumerate(runType):
analysisDir = os.path.normpath(os.path.join(os.path.abspath(self.paths['fmri_root']%subjID),
'analysis',runType))
try:
os.makedirs(analysisDir)
except:
print ('WARNING: Analysis folder already exists at %s' %
os.path.abspath(analysisDir))
# make analysis path relative to stats.m
analysisDir_str = ("cellstr(spm_select('CPath','%s'))" %
os.path.relpath(analysisDir, curpath))
dataFiles = glob.glob(self.paths['data_behav'] % subjID +\
'data_*_%s.csv' %runType)
regressorFiles = glob.glob(self.paths['data_fmri'] % subjID +\
'rp_*_%s.txt' %runType)
f.write("matlabbatch{%d}.spm.stats.fmri_spec.dir = %s;\n" %
(3*rtNo+1, analysisDir_str))
f.write("matlabbatch{%d}.spm.stats.fmri_spec.timing.units = 'secs';\n" %
(3*rtNo+1))
f.write("matlabbatch{%d}.spm.stats.fmri_spec.timing.RT = 2;\n" %
(3*rtNo+1))
for rnNo, dataFile in enumerate(dataFiles):
runNo = int(os.path.basename(dataFile).split('_')[1])
data = np.recfromcsv(dataFile, case_sensitive = True)
swapath = os.path.relpath(self.paths['data_fmri']%subjID, curpath)
f.write("matlabbatch{%d}.spm.stats.fmri_spec.sess(%d).scans = " %
(3*rtNo+1,rnNo+1) +\
# "cellstr(spm_select('ExtFPList','%s','^swafunc_%02d_%s\.nii$',1:168));\n" %(os.path.abspath(self.paths['data_fmri']%subjID),runNo,runType))
"cellstr(spm_select('ExtFPList','%s'," +\
"'^swafunc_%02d_%s\.nii$',1:168));\n" %
(swapath,runNo,runType))
conds = np.unique(data[condcol])
if runType == 'mer':
conds = conds[conds!=0]
for cNo, cond in enumerate(conds):
agg = data[data[condcol] == cond]
f.write("matlabbatch{%d}.spm.stats.fmri_spec.sess(%d)." +\
"cond(%d).name = '%d|%s';\n" % (3*rtNo+1,rnNo+1,cNo+1,
cond, agg[descrcol][0]))
if 'blockNo' in agg.dtype.names:
onsets = []
durs = []
for block in np.unique(agg['blockNo']):
onsets.append(agg[agg['blockNo']==block]['onset'][0])
durs.append(np.around(sum(agg[agg['blockNo']==block]['dur']),
decimals=1))
else:
onsets = np.round(agg['onset'])
durs = agg['dur']
# for fixation we remove the first and the last blocks
if cond == 0:
onsets = onsets[1:-1]
durs = durs[1:-1]
f.write("matlabbatch{%d}.spm.stats.fmri_spec.sess(%d).cond(%d).onset = %s;\n" %(3*rtNo+1,rnNo+1,cNo+1,onsets))
f.write("matlabbatch{%d}.spm.stats.fmri_spec.sess(%d).cond(%d).duration = %s;\n" %(3*rtNo+1,rnNo+1,cNo+1,durs))
regpath = os.path.relpath(regressorFiles[rnNo], curpath)
regpath_str = "cellstr(spm_select('FPList','%s','^%s$'))" % (os.path.dirname(regpath), os.path.basename(regpath))
f.write("matlabbatch{%d}.spm.stats.fmri_spec.sess(%d).multi_reg = %s;\n\n" %(3*rtNo+1,rnNo+1,regpath_str))
spmmat = "cellstr(fullfile(spm_select('CPath','%s'),'SPM.mat'));\n" % os.path.relpath(analysisDir, curpath)
f.write("matlabbatch{%d}.spm.stats.fmri_est.spmmat = %s" % (3*rtNo+2, spmmat))
f.write("matlabbatch{%d}.spm.stats.con.spmmat = %s" %(3*rtNo+3,
spmmat))
if runType == 'loc':
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.name = 'all > fix';\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.convec = [-2 1 1];\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.sessrep = 'repl';\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{2}.tcon.name = 'objects > scrambled';\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{2}.tcon.convec = [0 1 -1];\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{2}.tcon.sessrep = 'repl';\n\n\n" %(3*rtNo+3))
elif runType == 'mer':
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.name = 'hor > ver';\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.convec = [1 -1];\n" %(3*rtNo+3))
f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.sessrep = 'repl';\n\n\n" %(3*rtNo+3))
else:
# f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.name = 'all > fix';\n" %(3*rtNo+3))
conds = np.unique(data[condcol])
descrs = []
# skip fixation condition as it's our baseline
for cond in conds[1:]:
descrs.append((cond,
data[data[condcol]==cond][descrcol][0]))
# descrs = np.unique(data['descr'])
# descrs = descrs[descrs != 'fixation']
# thisCond = ' '.join(['-1']+['1']*len(descrs))
# f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.convec = [%s];\n" %(3*rtNo+3,thisCond))
# f.write("matlabbatch{%d}.spm.stats.con.consess{1}.tcon.sessrep = 'repl';\n" %(3*rtNo+3))
# dNo is corrected with +2: +1 for Matlab and +1 because we
# have 'all > fix'
# for now disabled
for dNo, descr in enumerate(descrs):
f.write("matlabbatch{%d}.spm.stats.con.consess{%d}.tcon.name = '%d|%s';\n" %(3*rtNo+3,dNo+1,descr[0],descr[1]))
thisCond = [-1] + [0]*dNo + [1] + [0]*(len(descrs)-dNo-1)
f.write("matlabbatch{%d}.spm.stats.con.consess{%d}.tcon.convec = %s;\n" %(3*rtNo+3,dNo+1,thisCond) )
f.write("matlabbatch{%d}.spm.stats.con.consess{%d}.tcon.sessrep = 'both';\n" %(3*rtNo+3,dNo+1))
f.write('\n\n')
f.write("save('stats.mat','matlabbatch');\n")
f.write("%%spm_jobman('interactive',matlabbatch);\n")
f.write("spm_jobman('run',matlabbatch);")
f.close()
def make_full(distance):
res = np.nan*np.ones(distance.shape)
iu = np.triu_indices(len(distance),k=1) # upper triangle less diagonal
il = np.tril_indices(len(distance),k=-1) # lower triangle less diagonal
res[iu] = distance[iu]
res = res.T
res[iu] = distance[iu]
return res
def plot_timecourse(df, title='', plt=None, cols='name'):
"""Plots an fMRI time course for signal change.
:Args:
df (:class:`pandas.DataFrame`)
A DataFrame with fMRI signal change computed.
:Kwargs:
- title (str, default: '')
Title for the plot (i.e., for the current axis, not the whole figure)
- plt (:class:`plot.Plot`, default: None)
The plot you're working on.
- cols (str or list of str, default: 'name')
Column names to plot as separate conditions (different curves)
"""
if plt is None:
plt = plot.Plot(sharex=True, sharey=True)
agg = stats.aggregate(df, values='subjResp', rows='time',
cols=cols, yerr='subjID')
ax = plt.plot(agg, title=title, kind='line')
ax.set_xlabel('Time since trial onset, s')
ax.set_ylabel('Signal change, %')
ax.axhline(linestyle='--', color='0.6')
def plot_similarity(similarity, names=None, percent=False):
similarity = make_symmetric(similarity)
trace = similarity.trace()/len(similarity)
offdiag = (np.sum(similarity) - similarity.trace()) / len(similarity) / (len(similarity)-1)
print '%.2f' %trace,
print '%.2f' %offdiag,
iu = np.triu_indices(len(similarity),k=1) # upper triangle less diagonal
rel = np.corrcoef(similarity[iu],similarity.T[iu])[0,1]
print '%.2f' %rel
# import pdb; pdb.set_trace()
if percent: plot_data = similarity*100
else: plot_data = similarity
im = plt.imshow(plot_data,interpolation='none',vmin=.45,vmax=.86)
plt.colorbar(im, use_gridspec=True)
# plt.tight_layout()
if not names is None:
names = [n[1] for n in names]
locs, labels = plt.xticks(range(plot_data.shape[1]), names)
plt.setp(labels, 'rotation', 'vertical')
locs, labels = plt.yticks(range(plot_data.shape[0]), names)
for index,value in np.ndenumerate(plot_data):
if np.isnan(value): h = ''
else:
if percent: h = '%d' %(value*100)
else: h = '.%d' %(value*100)
plt.text(index[1]-.5,index[0]+.5,h)
return im
def plot_hcluster(similarity, names):
import hcluster
similarity = make_symmetric(similarity)
sim2 = similarity - .5
sim2[sim2<0] = 0
# distance = Orange.core.SymMatrix(1-similarity)
# root = Orange.clustering.hierarchical.HierarchicalClustering(distance)
tree = hcluster.hcluster(sim2)
imlist = [ str(i[0]) + '-' + i[1] for i in names]
dendogram = hcluster.drawdendrogram(tree,imlist,jpeg='sunset.jpg')
plt.imshow(dendogram, cmap=plt.cm.gray)
def plot_mds(similarity, names):
#
similarity = make_symmetric(similarity)
sim2 = similarity - .5
sim2[sim2<0] = 0
#import pdb; pdb.set_trace()
distance = Orange.core.SymMatrix(sim2)
#import pdb; pdb.set_trace()
mds = Orange.projection.mds.MDS(distance)
mds.run(100)
for (x, y), name in zip(mds.points,names):
plt.plot((x,),(y,),'ro')
plt.text(x,y,name[1])
def mean_diag_off(matrix):
trace = matrix.trace()/len(matrix)
offdiag = (np.sum(matrix) - matrix.trace()) / len(matrix) / (len(matrix)-1)
return [trace,offdiag]
def avg_blocks(matrix, coding):
coding = np.array(coding)
coding_int = coding[np.not_equal(coding, None)] # remove nones
# try:
# np.bincount(coding_int)>1
# except:
# import pdb; pdb.set_trace()
coding_int = coding_int.astype(np.int)
if not np.all(np.bincount(coding_int)>1):
print np.bincount(coding_int)
sys.exit('You have a single occurence of some entry')
else:
uniquec = np.unique(coding_int)
avg = np.zeros( (len(uniquec),len(uniquec)) )
for i,ui in enumerate(uniquec):
indi = coding == ui
for j,uj in enumerate(uniquec):
indj = coding == uj
ind = np.outer(indi,indj)
np.fill_diagonal(ind,False)
avg[i,j] = np.mean(matrix[ind])
return avg
def plot_psc(*args, **kwargs):
"""
DEPRECATED. Plots percent signal change of raw data
"""
ax = plot.pivot_plot(marker='o', kind='line', *args, **kwargs)
ax.set_xlabel('Time since trial onset, s')
ax.set_ylabel('Signal change, %')
ax.axhline(linestyle='--', color='0.6')
# plt.xlim(( 0,evdsMean.shape[1]+1 ))
# plt.ylim((-.5,2.))
ax.legend(loc=0).set_visible(False)
return ax
"""
Other tools
"""
def make_roi_pattern(rois):
"""
Takes ROI names and expands them into a list of:
- ROI name as given
- Pretty ROI name for output
- ROI names with * prepended and appended for finding these ROIs easily
using `glob`
:Args:
rois (list of str or tuples):
A list of ROI names, e.g., `['V1', (['rh_V2','lh_V2'], 'V2')]`.
If an element is a tuple, the first element is ROI names and the
second one is their "pretty" (unifying) name for printing.
:Returns:
A list of ROI names in the format described above, e.g.
`[('V1','V1','*V1*'), (['rh_V2','lh_V2'], 'V2', ['*rh_V2*','*lh_V2*'])]`
"""
def makePatt(ROI):
"""Expands ROI patterns by appennding *"""
return ['*'+thisROI+'*' for thisROI in ROI]
if not isinstance(rois, list) and not isinstance(rois, tuple):
rois = [rois]
ROIs = []
for ROI in rois:
if type(ROI) == tuple: # renaming is provided
ROIs.append(ROI + (makePatt(ROI[0]),))
elif type(ROI) == list: # a list of ROIs is provided
ROIs.append((ROI, '-'.join(ROI), makePatt(ROI)))
else: # just a single ROI name provided
ROIs.append((ROI, ROI, makePatt([ROI])))
return ROIs
UTF-8
Python
false
false
2,013
14,070,312,882,562
7f188ac44e846495d6fd2200bbd924ff4ffdef40
c534bbcab5278a0c2bc4cf2d130aff7a0695b566
/lib/issuance.py
7cb457f4c024f87454895034009391088407daa3
[]
no_license
becherovka/Counterparty
https://github.com/becherovka/Counterparty
1599517fd5aabb4c988cd01604f4196c31837cfb
b374da2fd61e86e14d6931d7b436bb521f2a34d5
refs/heads/master
2020-04-05T18:30:27.423959
2013-12-24T21:46:11
2013-12-24T21:46:11
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/python3
import struct
import sqlite3
import logging
from . import (config, util, exceptions, bitcoin, api)
FORMAT = '>QQ?'
ID = 20
LENGTH = 8 + 8 + 1
def create (source, asset_id, amount, divisible):
db = sqlite3.connect(config.DATABASE)
db.row_factory = sqlite3.Row
cursor = db.cursor()
# Handle potential re‐issuances.
issuances = api.get_issuances(validity='Valid', asset_id=asset_id)
if issuances:
if issuances[0]['issuer'] != source:
raise exceptions.IssuanceError('Asset exists and was not issuanced by this address.')
if issuances[0]['divisible'] != divisible:
raise exceptions.IssuanceError('That asset exists with a different divisibility.')
if not amount:
raise exceptions.UselessError('Zero amount.')
data = config.PREFIX + struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, asset_id, amount, divisible)
db.close()
return bitcoin.transaction(source, None, None, config.MIN_FEE, data)
def parse (db, cursor, tx, message):
# Ask for forgiveness…
validity = 'Valid'
# Unpack message.
try:
asset_id, amount, divisible = struct.unpack(FORMAT, message)
except Exception:
asset_id, amount, divisible = None, None, None
validity = 'Invalid: could not unpack'
if validity == 'Valid':
if not amount:
validity = 'Invalid: zero amount.'
# If re‐issuance, check for compatability in divisibility, issuer.
issuances = api.get_issuances(validity='Valid', asset_id=asset_id)
if issuances:
if issuances[0]['issuer'] != tx['source']:
validity = 'Invalid: that asset already exists and was not issuanced by this address'
if validity == 'Valid' and issuance['divisible'] != divisible:
validity = 'Invalid: asset exists with a different divisibility'
# Credit.
if validity == 'Valid':
cursor = util.credit(db, cursor, tx['source'], asset_id, amount)
if divisible: divisibility = 'divisible'
else: divisibility = 'indivisible'
logging.info('(Re‐)Issuance: {} created {} of {} asset {} ({})'.format(tx['source'], util.devise(amount, asset_id, 'output'), divisibility, asset_id, util.short(tx['tx_hash'])))
# Add parsed transaction to message‐type–specific table.
cursor.execute('''INSERT INTO issuances(
tx_index,
tx_hash,
block_index,
asset_id,
amount,
divisible,
issuer,
validity) VALUES(?,?,?,?,?,?,?,?)''',
(tx['tx_index'],
tx['tx_hash'],
tx['block_index'],
asset_id,
amount,
divisible,
tx['source'],
validity)
)
return cursor
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
"""
This dummy module can be used as a basis for creating your own
- Copy this module to a new name
- Make the changes described below
"""
# Note, make sure you use decimal math everywhere!
from decimal import Decimal
from django.utils.translation import ugettext as _
from shipping.modules.base import BaseShipper
from livesettings import config_value
class Shipper(BaseShipper):
flatRateFee = Decimal("15.00")
id = "HKPost"
def __str__(self):
"""
This is mainly helpful for debugging purposes
"""
return "Varying Rate"
def description(self):
"""
A basic description that will be displayed to the user when selecting their shipping options
"""
return _("HongKong Post Shipping")
def calculate(self, cart, contact):
"""
Based on the destination, the shipping fee varies
"""
from satchmo_store.shop.models import Config
shop_details = Config.objects.get_current()
# shippingdata = {
# 'contact': contact,
# 'cart': cart,
# 'shipping_address' : shop_details,
# 'shipping_phone' : shop_details.phone,
# 'shipping_country_code' : shop_details.country.iso2_code
# }
# from l10n.models import CONTINENTS
# self.country = contact.shipping_address.country.iso2_code
# self.continent = [v for k,v in CONTINENTS if k == shop_details.country.continent][0]
self.cart = cart
self.contact = contact
self._calculated = True
def cost(self):
"""
Complex calculations can be done here as long as the return value is a decimal figure
"""
assert(self._calculated)
for cartitem in self.cart.cartitem_set.all():
if cartitem.product.is_shippable:
if self.contact.shipping_address.country.iso2_code != 'HK':
if self.contact.shipping_address.country.continent == 'AS':
return config_value('SHIPPING', 'RATE_Asia')
elif self.contact.shipping_address.country.continent == 'NA':
return config_value('SHIPPING', 'RATE_NorthAmerica')
elif self.contact.shipping_address.country.continent == 'SA':
return config_value('SHIPPING', 'RATE_SouthAmerica')
elif self.contact.shipping_address.country.continent == 'EU':
return config_value('SHIPPING', 'RATE_Europe')
elif self.contact.shipping_address.country.continent == 'AF':
return config_value('SHIPPING', 'RATE_Africa')
elif self.contact.shipping_address.country.continent == 'OC':
return config_value('SHIPPING', 'RATE_Oceania')
elif self.contact.shipping_address.country.continent == 'AN':
return config_value('SHIPPING', 'RATE_Antarctica')
return Decimal("0.00")
def method(self):
"""
Describes the actual delivery service (Mail, FedEx, DHL, UPS, etc)
"""
return _("HongKong Post")
def expectedDelivery(self):
"""
Can be a plain string or complex calcuation returning an actual date
"""
return _("3 - 4 business days")
def valid(self, order=None):
"""
Can do complex validation about whether or not this option is valid.
For example, may check to see if the recipient is in an allowed country
or location.
"""
return True
UTF-8
Python
false
false
2,014
5,411,658,798,554
394629a06af74cab3078a2b36858eb5dce34f6ea
6eb6fbba8c3b961fd1d553404780a7cd7185dd13
/tornado_json/application.py
155cb5f2b758ca15d9d06f6aa254675f40843de5
[
"MIT"
]
permissive
Shu-Ji/Tornado-JSON
https://github.com/Shu-Ji/Tornado-JSON
4585139dc0484f4da2d793445eabd68c21ef9694
f2b0256c8cf518f797c58b0a40cbdc1aadce1085
refs/heads/master
2021-01-14T12:35:13.748532
2014-07-14T01:25:12
2014-07-14T01:25:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import tornado.web
from tornado_json.api_doc_gen import api_doc_gen
class Application(tornado.web.Application):
"""Entry-point for the app
- Generate API documentation using provided routes
- Initialize the application
:type routes: [(url, RequestHandler), ...]
:param routes: List of routes for the app
:type settings: dict
:param settings: Settings for the app
:param db_conn: Database connection
"""
def __init__(self, routes, settings, db_conn=None):
# Generate API Documentation
api_doc_gen(routes)
# Unless gzip was specifically set to False in settings, enable it
if "gzip" not in list(settings.keys()):
settings["gzip"] = True
tornado.web.Application.__init__(
self,
routes,
**settings
)
self.db_conn = db_conn
UTF-8
Python
false
false
2,014
3,135,326,146,454
13e9ba7cde0c36980642cf92b2b31d476d9bebbd
e0939af2faffe3bcab0f74988e35c11c8c435ead
/src/demo/rooms/start.py
b63e9b66f4abf719778994bc3c4ec93d3e9f4821
[]
no_license
Fiona/fiiction
https://github.com/Fiona/fiiction
4498a36d5991c8963ef2c286e877b46109b371be
5e160c460b2de8208fad42c2ff55c33c19e55b7e
refs/heads/master
2022-10-07T05:25:04.122264
2009-01-14T11:15:03
2009-01-14T11:15:03
106,967
4
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""
MY STARTING ROOM
"""
from fiiction.room_helpers import Cavern
from fiiction.dir import North
class start(Cavern):
room_name = "a cave"
room_init_description = """You fall into a small hole and end up in a cave.\n
Don'task me, I just work here..."""
room_description = "It is a very dark cave. What else do you want?"
def create_room(self):
Cavern.create_room(self)
self.exits["north"] = North(self.game.rooms['second_room']);
self.things["rock"] = self.game.things["rock"];
UTF-8
Python
false
false
2,009
8,924,942,089,891
cbc9c89f982c8f1675847b01eb5ab44a75ef607f
4045e8576b3c0100c0a35ee78a5b5c549c9e2878
/reverseIfElse.py
5a353b971a7957937dafe9ad6ae4534dd571b7ed
[]
no_license
sikoszabolcs/switchEmptyIfs
https://github.com/sikoszabolcs/switchEmptyIfs
9f432b182110d68ab5eb61498fbb3e70fc9c97a1
4f855203f9964570207704c99a4b0cfdf016e2b3
refs/heads/master
2021-01-15T23:11:56.259931
2013-10-02T12:06:56
2013-10-02T12:06:56
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import re
def getFileContents(fileName):
f = open(fileName, "r")
fileContents = f.read()
f.close()
return fileContents
def findIfWithEmptyBodyInString(searchStr):
prog = re.compile(r'\{$.*?^\}$', re.DOTALL | re.MULTILINE)
return prog.finditer(searchStr)
def findNestedString(searchStr):
matches = re.finditer(r'if((:?.*?\s*)*)\{\s*\}', searchStr)
expressions = []
for m in matches:
first = m.group().find('(') # first occurrence of '('
last = m.group().rfind(')') # find last occurrence of ')'
if first == -1 or last == -1:
raise NameError("Invalid searchStr! There are missing brackets!")
expressions.append(m.group()[first+1 : last])
return expressions
def findMinimal(searchStr):
prog = re.compile(r'\{\s*\}\s*else\s*\{', re.DOTALL)
matches = prog.finditer(searchStr)
posInSearchStr = 0
ifExpressions = []
elseBlocks = []
matchObjects = []
for m in matches:
matchObjects.append(m)
# find else block
openCurlyBraceCount = 1
posInSearchStr = m.end()
elseBlock = ""
while openCurlyBraceCount != 0:
curChar = searchStr[posInSearchStr]
if curChar == '{':
openCurlyBraceCount += 1
if curChar == '}':
openCurlyBraceCount -= 1
if openCurlyBraceCount == 0:
break
elseBlock += curChar
posInSearchStr += 1
elseBlocks.append(elseBlock)
# find if expression
openBraketCount = 1
posInSearchStr = m.start()
while curChar != ')': # !!!WEAKNESS: Error when searchStr contains invalid expression!
curChar = searchStr[posInSearchStr]
posInSearchStr -= 1
ifExpression = ""
while openBraketCount != 0:
curChar = searchStr[posInSearchStr]
if curChar == ')':
openBraketCount += 1
if curChar == '(':
openBraketCount -= 1
if openBraketCount == 0:
break
ifExpression += curChar
posInSearchStr -= 1
ifExpressions.append(ifExpression[::-1])
return matchObjects, ifExpressions, elseBlocks
def insertRevStart(author, insertPos):
revStart = "// @revstart[-] 20130926 " + author + " replace else query by a negated if query"
def insertRevEnd(author, insertPos):
revEnd = "// @revend 20130926 " + author
def determineIndentationOfRevStartCommentFromPos(ifExprStartPos, searchStr):
posInSearchStr = ifExprStartPos
indentation = ""
curChar = ''
while curChar != '\n' and posInSearchStr >= 0:
curChar = searchStr[posInSearchStr]
indentation += curChar
posInSearchStr -= 1
indentation = indentation[::-1] # reverse
try:
indentation = indentation[:indentation.index('if')]
except Exception, e:
raise Exception("Invalid IF expression!")
return indentation
def insertElseIntoIf(searchStr):
matches, ifs, elses = findMinimal(searchStr)
result = searchStr
for m in reversed(matches):
result = result[:m.start()+1] + result[m.end():]
return result
#def getElseBodyPart():
#def getIfExpression():
#def negateExpression():
#def switchIfBodyWithElseBody():
#def readAllFilesFromFolder(folder):
def main():
#fileName = "d:\Views\evoSiSz0_ESF_CoreDev_002.001_AT_pre\ESF_CORE\dev\logic\src\BaseSI.cpp"
fileName = "test.txt"
expressions = findNestedString(getFileContents(fileName))
print expressions
if __name__ == "__main__":
main()
UTF-8
Python
false
false
2,013
2,147,483,655,269
e27bf8fcbe770d94cc0156e92748b7244ca917a7
3bb83490573abcd8500aafdb601ac85e4e7a494f
/setup.py
143860cf121ff97648f298f740dc154b1aa66487
[
"MIT"
]
permissive
CNXTEoE/fastqp
https://github.com/CNXTEoE/fastqp
d2ab16f5b65c2731ce81083e21b5c892caedc000
ff5734fc574dbe9afe0c8ad7906901e973580636
refs/heads/master
2017-06-23T18:04:32.730403
2014-10-09T16:43:10
2014-10-09T16:43:10
83,190,693
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from setuptools import setup
setup(
name = 'fastqp',
provides = 'fastqp',
version = "0.1.4",
author = 'Matthew Shirley',
author_email = '[email protected]',
url = 'http://mattshirley.com',
description = 'Simple NGS read quality assessment using Python',
license = 'MIT',
packages = ['fastqp', 'fastqp.backports'],
install_requires=['six', 'matplotlib', 'numpy', 'pyfaidx'],
entry_points = { 'console_scripts': [ 'fastqp = fastqp.cli:main' ] },
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
#!/usr/bin/env python2.5
##
## This file is part of ContextKit.
##
## Copyright (C) 2008, 2009 Nokia. All rights reserved.
##
## Contact: Marius Vollmer <[email protected]>
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public License
## version 2.1 as published by the Free Software Foundation.
##
## This library is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
## 02110-1301 USA
##
##
## This test:
## - starts up a client and a provider
## - checks for the provided values on the client stdout
## - starts a commander with a different value for that property
## - checks the output for the changed value
## - changes the value inside the commander to unknown, check it
## - changes the value inside the commander back to some meaningful value, check it
## - kills the commander
## - checks that value goes back to normal
##
import sys
import os
import unittest
from ContextKit.cltool import CLTool, wanted, wantedUnknown
class CommanderAppearing(unittest.TestCase):
def setUp(self):
os.environ['CONTEXT_COMMANDING'] = '1'
def tearDown(self):
os.unlink('context-provide.context')
def testCommanderFunctionality(self):
provider = CLTool("context-provide", "--v2", "com.nokia.test", "int", "test.int", "42")
provider.send("dump")
provider.expect("Wrote") # wait for it
listen = CLTool("context-listen", "test.int")
self.assert_(listen.expect(wanted("test.int", "int", "42")),
"Bad value initially from the real provider, wanted 42")
commander = CLTool("context-provide", "--v2")
commander.send("add int test.int 4242")
commander.send("start")
commander.expect("Added") # wait for it
self.assert_(listen.expect(wanted("test.int", "int", "4242")),
"Value after commander has been started is wrong, wanted 4242")
commander.send("unset test.int")
listen.comment("commander commanded test.int to unknown")
self.assert_(listen.expect(wantedUnknown("test.int")),
"Value after commander has changed it to unknown is wrong")
commander.send("test.int = 1235")
self.assert_(listen.expect(wanted("test.int", "int", "1235")),
"Value after commander has changed it is wrong, wanted 1235")
commander.wait()
listen.comment("Commander killed")
self.assert_(listen.expect(wanted("test.int", "int", "42")),
"Value after killing the commander is wrong, wanted 42")
listen.wait()
provider.wait()
def runTests():
suiteInstallation = unittest.TestLoader().loadTestsFromTestCase(CommanderAppearing)
result = unittest.TextTestRunner(verbosity=2).run(suiteInstallation)
return len(result.errors + result.failures)
if __name__ == "__main__":
sys.exit(runTests())
#! /usr/bin/python
#program finds the longest substring in alphabetical order in an entered string
s=raw_input("Enter a string: ")
string = "abcdefghijklmnopqrstuvwxyz"
n=2 # size of a sample of alphabet being checked
m=0 # how far along the sample of alphabet you are
mem=" " # definition of mem
while n<=len(s): # increases the size of the sample by one
while m<=len(string): # checks for matches with the same sample size moving along the string of alphabet
for i in range(0,len(s),1):# runs through len(s) number of cases
if s[i:i+n] == string[m:m+n] and len(s[i:i+n]) > len(mem): # checks if s sample matches with the sample of the alphabet that is being used and if the new found string is longer than the previous one
mem = s[i:i+n] # overwrites mem if above is true
m += 1 # moves along the string of alphabet by 1 letter
m = 0 # resets m counter
n += 1 # increses the size of the sample by 1 letter
print("Longest substring in alphabetical order is: " + mem)
UTF-8
Python
false
false
2,014
17,068,200,073,552
5648ea0ab351be16205804e423c139b1c12f5d9c
ac29a8985d4452773312c550b964c09a3bb1ea1a
/src/moduloerror.py~
218e29758e16b13d5e5b0c8a08dc7bf835d637aa
[]
no_license
aluu0100819373/prct13
https://github.com/aluu0100819373/prct13
2c3816fe5edb961fefbf3486baf92a43be40c227
8691aeacbc1d3619b9f24013ab96a6dbe2e0c1aa
refs/heads/master
2020-05-16T12:12:20.921936
2014-05-09T09:24:55
2014-05-09T09:24:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python
#!encoding: UTF-8
import moduloaproximacion
import sys
def error(n, k, umbral):
contador=0
for j in range (1,k+1):
diferencia=moduloaproximacion.aproximapi(n)-moduloaproximacion.pi
if (abs(diferencia) > umbral):
contador += 1
return contador / float(k) * 100.0
if __name__=="__main__":
if((len(sys.argv)==1) or (leng(sys.argv)==2)):
print ("No se han introducido los valores necesarios. Se utilizarán los valores predeterminados")
n=10
k=10
u=0.1
else:
n= int(sys.argv[1])
k= int(sys.argv[k])
u= float(sys.argv[3])
print error(n,k,u)
UTF-8
Python
false
false
2,014
326,417,543,914
4515c238c9fe9ab1c0c396bc8a6e648af617e7c0
700ac70ae453ae5811a447e3f4ad017be187db93
/webby/test/livetest_signup.py
f0323a2b4f10acf3a06a5b6caa618660ec691bc6
[]
no_license
corydodt/Vellum
https://github.com/corydodt/Vellum
bd4e95db8213be728f24bba375c277e833531b4b
d281e3b380e42d28a15ac0a06059392baef455bd
refs/heads/master
2020-05-17T05:34:52.196188
2009-02-07T05:56:08
2009-02-07T05:56:08
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from twisted.internet import defer
from nevow import athena
from nevow.livetrial import testcase
from webby import signup
class StubSignup(signup.Signup):
"""Signup widget that doesn't really send email."""
def processSignup(self, email, password):
"""
Make it succeed or fail by passing an email address with or without
an @ sign.
"""
if '@' in email:
d = defer.succeed(None)
else:
1/0
return d
athena.expose(processSignup)
class TestSignup(testcase.TestCase):
jsClass = u'Signup.Tests.TestSignup'
def newSignup(self, ):
"""
Return a new Signup widget
"""
su = StubSignup('http://')
su.setFragmentParent(self)
return su
athena.expose(newSignup)
UTF-8
Python
false
false
2,009
1,821,066,176,169
e04df93ba99a722afa01b5cea7de9f648a7798aa
4bd74c4224433552535af829455d095b1cbd3bba
/nappingcat/serve.py
1315b78ff8febe5f3f6427fac50a9154d9c4d9f3
[
"CDDL-1.0"
]
non_permissive
tswicegood/nappingcat
https://github.com/tswicegood/nappingcat
ee0f1b2b816aac2dca4bf2a48c5bea022162e09b
1992861f21d35d8758420dc0b1e9414ffa68d94a
refs/heads/master
2020-12-24T22:48:34.711224
2010-08-28T18:44:12
2010-08-28T18:44:12
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from nappingcat import config
from nappingcat.app import App
from nappingcat.patterns import patterns, include, CommandPatterns
from nappingcat.util import import_module, import_class_from_module
from nappingcat.request import Request
from nappingcat.exceptions import NappingCatBadArguments
import sys
class ServeApp(App):
def create_request(self, cmdpatterns):
try:
user = self.environ.get('argv', [None])[0]
except IndexError:
raise NappingCatBadArguments("nappingcat-serve needs a user to run properly.")
return Request(
user=self.environ.get('argv', [None])[0],
command=self.environ.get('SSH_ORIGINAL_COMMAND', None),
settings=self.global_settings,
streams=(self.stdin, self.stdout, self.stderr),
root_patterns=cmdpatterns,
)
def setup_environ(self):
super(ServeApp, self).setup_environ()
router_module_names = self.nappingcat_settings.get('routers')
router_module_names = "" if not router_module_names else router_module_names
self.routers = [(r'^', include(i)) for i in router_module_names.split('\n') if i]
pubkey_handler_name = self.nappingcat_settings.get('public_key_handler', 'nappingcat.pubkey_handlers.AuthorizedKeysFile')
self.public_key_handler = import_class_from_module(pubkey_handler_name)()
def main(self):
cmdpatterns = CommandPatterns('', self.routers)
request = self.create_request(cmdpatterns)
target, match = cmdpatterns.match(request.command)
result = target(request, **match.groupdict())
request.auth_backend.finish(self.public_key_handler)
return result
UTF-8
Python
false
false
2,010
13,331,578,532,842
4809b6cd7a279207797ddf69c0d65a51210710fa
80e1e28ffa95d4d59f66153683f230afb695b69c
/explib/__init__.py
efad581a23ac7708e5a1af7552fb4a47498b4c6a
[
"LGPL-2.0-or-later",
"LGPL-2.1-or-later"
]
non_permissive
humm/l2l
https://github.com/humm/l2l
f4e0b1b7d20e665bc228ff10b83dbad5f274b1fe
c5bf2433fd79054963c68696bef1726eb972ee80
refs/heads/master
2021-01-22T01:06:06.649780
2013-08-08T10:23:16
2013-08-08T10:23:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import analysis, runs, stages
UTF-8
Python
false
false
2,013
8,031,588,871,029
2673dd4e30b3961fc8014f2a823e1ae51f5dbc92
85346d91cc9f783c388ee9f27f4720a66d7ae1ff
/model2d/testing_testing.py
16fe30b4c68c1f80fe1c33711d4acb84ee73c644
[]
no_license
arebgun/bolt
https://github.com/arebgun/bolt
6c2279e7df90ff6ec4eb239dea668cb2e55ed15f
1c00b276bd7c21f0d8e2e1ab974eb528ad8686df
refs/heads/master
2021-01-17T16:22:28.675054
2013-06-07T01:12:55
2013-06-07T01:12:55
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python
from __future__ import division
# from random import random
import sys
import traceback
sys.path.append("..")
from myrandom import random
choice = random.choice
random = random.random
from sentence_from_location import (
generate_sentence,
accept_correction,
accept_object_correction,
train,
Point
)
from location_from_sentence import get_tree_probs
from parse import get_modparse, ParseError
from nltk.tree import ParentedTree
from semantics.run import construct_training_scene
from semantics.landmark import Landmark
from semantics.relation import DistanceRelation
from semantics.representation import PointRepresentation, LineRepresentation, RectangleRepresentation, GroupLineRepresentation
from nltk.metrics.distance import edit_distance
from planar import Vec2
from utils import logger, m2s, entropy_of_probs, printcolors
import numpy as np
from matplotlib import pyplot as plt
from copy import copy
from datetime import datetime
from location_from_sentence import get_all_sentence_posteriors
from multiprocessing import Process, Pipe
from itertools import izip, product
from models import CProduction, CWord
from math import ceil
from utils import categorical_sample
import utils
import time
import shelve
from semantics.language_generator import describe
from parse import ParseError
# import IPython
# IPython.embed()
def spawn(f):
def fun(ppipe, cpipe,x):
ppipe.close()
cpipe.send(f(x))
cpipe.close()
return fun
def parmap(f,X):
pipe=[Pipe() for x in X]
proc=[Process(target=spawn(f),args=(p,c,x)) for x,(p,c) in izip(X,pipe)]
[p.start() for p in proc]
ret = [p.recv() for (p,c) in pipe]
[p.join() for p in proc]
return ret
def autocorrect(scene_descs, test_scene_descs, turk_answers, tag='', chunksize=5, scale=1000, num_processors=7, num_samples=5, step=0.04):
# plt.ion()
printing=False
def loop(data):
time.sleep(data['delay'])
scene = data['scene']
speaker = data['speaker']
utils.scene.set_scene(scene,speaker)
num_iterations = len(data['loc_descs'])
all_meanings = data['all_meanings']
loi = data['loi']
loi_infos = data['loi_infos']
landmarks = data['landmarks']
sorted_meaning_lists = data['sorted_meaning_lists']
learn_objects = data['learn_objects']
def heatmaps_for_sentences(sentences, all_meanings, loi_infos, xs, ys, scene, speaker, step=0.02):
printing=False
x = np.array( [list(xs-step*0.5)]*len(ys) )
y = np.array( [list(ys-step*0.5)]*len(xs) ).T
scene_bb = scene.get_bounding_box()
scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )
combined_heatmaps = []
for obj_lmk, ms, heatmapss in loi_infos:
combined_heatmap = None
for sentence in sentences:
posteriors = None
while not posteriors:
try:
posteriors = get_all_sentence_posteriors(sentence, all_meanings, printing=printing)
except ParseError as pe:
raise pe
except Exception as e:
print e
sleeptime = random()*0.5
logger('Sleeping for %f and retrying "%s"' % (sleeptime,sentence))
time.sleep(sleeptime)
continue
big_heatmap1 = None
for m,(h1,h2) in zip(ms, heatmapss):
lmk,rel = m
p = posteriors[rel]*posteriors[lmk]
if big_heatmap1 is None:
big_heatmap1 = p*h1
else:
big_heatmap1 += p*h1
if combined_heatmap is None:
combined_heatmap = big_heatmap1
else:
combined_heatmap *= big_heatmap1
combined_heatmaps.append(combined_heatmap)
return combined_heatmaps
object_answers = []
object_distributions = []
object_sentences =[]
object_ids = []
epsilon = 1e-15
for iteration in range(num_iterations):
logger(('Iteration %d comprehension' % iteration),'okblue')
trajector = data['lmks'][iteration]
if trajector is None:
trajector = choice(loi)
logger( 'Teacher chooses: %s' % trajector )
probs, sorted_meanings = zip(*sorted_meaning_lists[trajector][:30])
probs = np.array(probs)# - min(probs)
probs /= probs.sum()
sentences = data['loc_descs'][iteration]
if sentences is None:
(sampled_landmark, sampled_relation) = categorical_sample( sorted_meanings, probs )[0]
logger( 'Teacher tries to say: %s' % m2s(sampled_landmark,sampled_relation) )
head_on = speaker.get_head_on_viewpoint(sampled_landmark)
sentences = [describe( head_on, trajector, sampled_landmark, sampled_relation )]
object_sentences.append( ' '.join(sentences) )
object_ids.append( data['ids'][iteration] )
logger( 'Teacher says: %s' % ' '.join(sentences))
for i,(p,sm) in enumerate(zip(probs[:15],sorted_meanings[:15])):
lm,re = sm
logger( '%i: %f %s' % (i,p,m2s(*sm)) )
lmk_probs = []
try:
combined_heatmaps = heatmaps_for_sentences(sentences, all_meanings, loi_infos, xs, ys, scene, speaker, step=step)
except ParseError as e:
logger( 'Unable to get object from sentence. %s' % e, 'fail' )
top_lmk = None
distribution = [(0, False, False)]
else:
for combined_heatmap,obj_lmk in zip(combined_heatmaps, loi):
ps = [p for (x,y),p in zip(list(product(xs,ys)),combined_heatmap) if obj_lmk.representation.contains_point( Vec2(x,y) )]
# print ps, xs.shape, ys.shape, combined_heatmap.shape
lmk_probs.append( (sum(ps)/len(ps), obj_lmk) )
lmk_probs = sorted(lmk_probs, reverse=True)
top_p, top_lmk = lmk_probs[0]
lprobs, lmkss = zip(*lmk_probs)
distribution = [ (lprob, lmk.name, loi.index(lmk)) for lprob,lmk in lmk_probs ]
logger( sorted(zip(np.array(lprobs)/sum(lprobs), [(l.name, l.color, l.object_class) for l in lmkss]), reverse=True) )
logger( 'I bet %f you are talking about a %s %s %s' % (top_p/sum(lprobs), top_lmk.name, top_lmk.color, top_lmk.object_class) )
# objects.append(top_lmk)
answer = (trajector.name,loi.index(trajector))
object_answers.append( answer )
object_distributions.append( distribution )
# Present top_lmk to teacher
logger("top_lmk == trajector: %r, learn_objects: %r" % (top_lmk == trajector,learn_objects), 'okgreen')
if top_lmk == trajector or not learn_objects:
# Give morphine
logger("Ahhhhh, morphine...", 'okgreen')
pass
else:
logger("LEARNING!!!!!!!!!!!", 'okgreen')
updates, _ = zip(*sorted_meaning_lists[trajector][:30])
howmany=5
for sentence in sentences:
for _ in range(howmany):
meaning = categorical_sample( sorted_meanings, probs )[0]
update = updates[ sorted_meanings.index(meaning) ]
try:
accept_object_correction( meaning, sentence, update*scale, printing=printing)
except:
pass
for update, meaning in sorted_meaning_lists[trajector][-howmany:]:
try:
accept_object_correction( meaning, sentence, update*scale, printing=printing)
except:
pass
return zip(object_answers, object_distributions, object_sentences, object_ids)
filename = 'testing'
filename += ('_u%i_%s_%s.shelf' % (scale,tag,time.asctime(time.localtime()).replace(' ','_').replace(':','')))
f = shelve.open(filename)
f['turk_answers'] = turk_answers
f['chunks'] = []
f['object_answers'] = []
f['object_distributions'] = []
f['object_sentences'] = []
f['object_ids'] = []
f['test_object_answers'] = []
f['test_object_distributions'] = []
f['test_object_sentences'] = []
f['test_object_ids'] = []
f.close()
def interleave(*args):
for idx in range(0, max(len(arg) for arg in args)):
for arg in args:
try:
yield arg[idx]
except IndexError:
continue
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
num_scenes = len(scene_descs)
processors_per_scene = int(num_processors/num_scenes)
# new_scene_descs = scene_descs
new_scene_descs = []
new_test_scene_descs = []
# cache = shelve.open('cache.shelf')
# if 'new_scene_descs' in cache:
# new_scene_descs = cache['new_scene_descs']
# new_test_scene_descs = cache['new_test_scene_descs']
# else:
for i,(scene_desc, test_scene_desc) in enumerate(zip(scene_descs,test_scene_descs)):
scene = scene_desc['scene']
speaker = scene_desc['speaker']
assert(scene == test_scene_desc['scene'])
assert(speaker == test_scene_desc['speaker'])
utils.scene.set_scene(scene,speaker)
scene_bb = scene.get_bounding_box()
scene_bb = scene_bb.inflate( Vec2(scene_bb.width*0.5,scene_bb.height*0.5) )
table = scene.landmarks['table'].representation.get_geometry()
loi = [lmk for lmk in scene.landmarks.values() if lmk.name != 'table']
all_heatmaps_tupless, xs, ys = speaker.generate_all_heatmaps(scene, step=step, loi=loi)
loi_infos = []
all_meanings = set()
for obj_lmk,all_heatmaps_tuples in zip(loi, all_heatmaps_tupless):
lmks, rels, heatmapss = zip(*all_heatmaps_tuples)
meanings = zip(lmks,rels)
# print meanings
all_meanings.update(meanings)
loi_infos.append( (obj_lmk, meanings, heatmapss) )
all_heatmaps_tuples = speaker.generate_all_heatmaps(scene, step=step)[0][0]
landmarks = list(set(zip(*all_heatmaps_tuples)[0]))
object_meaning_applicabilities = {}
for obj_lmk, ms, heatmapss in loi_infos:
for m,(h1,h2) in zip(ms, heatmapss):
ps = [p for (x,y),p in zip(list(product(xs,ys)),h1) if obj_lmk.representation.contains_point( Vec2(x,y) )]
if m not in object_meaning_applicabilities:
object_meaning_applicabilities[m] = {}
object_meaning_applicabilities[m][obj_lmk] = sum(ps)/len(ps)
k = len(loi)
for meaning_dict in object_meaning_applicabilities.values():
total = sum( meaning_dict.values() )
if total != 0:
for obj_lmk in meaning_dict.keys():
meaning_dict[obj_lmk] *= meaning_dict[obj_lmk]/total# - 1.0/k
# total = sum( [value for value in meaning_dict.values() if value > 0] )
# for obj_lmk in meaning_dict.keys():
# meaning_dict[obj_lmk] = (2 if meaning_dict[obj_lmk] > 0 else 1)*meaning_dict[obj_lmk] - total
sorted_meaning_lists = {}
for m in object_meaning_applicabilities.keys():
for obj_lmk in object_meaning_applicabilities[m].keys():
if obj_lmk not in sorted_meaning_lists:
sorted_meaning_lists[obj_lmk] = []
sorted_meaning_lists[obj_lmk].append( (object_meaning_applicabilities[m][obj_lmk], m) )
for obj_lmk in sorted_meaning_lists.keys():
sorted_meaning_lists[obj_lmk].sort(reverse=True)
together = zip(scene_desc['lmks'],scene_desc['loc_descs'],scene_desc['ids'])
n = int(ceil(len(together)/float(processors_per_scene)))
for j,chunk in enumerate(chunks(together,n)):
lmks, loc_descs, ids = zip(*chunk)
new_scene_descs.append( {'scene':scene,
'speaker':speaker,
'lmks':lmks,
'loc_descs':loc_descs,
'ids':ids,
'all_meanings':all_meanings,
'loi':loi,
'loi_infos':loi_infos,
'landmarks':landmarks,
'sorted_meaning_lists':sorted_meaning_lists,
'learn_objects':True,
'delay':(j*num_scenes+i)/10.0})
# test_scene_desc['all_meanings'] = all_meanings
# test_scene_desc['loi'] = loi
# test_scene_desc['loi_infos'] = loi_infos
# test_scene_desc['landmarks'] = landmarks
# test_scene_desc['sorted_meaning_lists'] = sorted_meaning_lists
# test_scene_desc['learn_objects'] = False
together = zip(test_scene_desc['lmks'],test_scene_desc['loc_descs'],test_scene_desc['ids'])
n = int(ceil(len(together)/float(processors_per_scene)))
for j,chunk in enumerate(chunks(together,n)):
lmks, loc_descs, ids = zip(*chunk)
new_test_scene_descs.append( {'scene':scene,
'speaker':speaker,
'lmks':lmks,
'loc_descs':loc_descs,
'ids':ids,
'all_meanings':all_meanings,
'loi':loi,
'loi_infos':loi_infos,
'landmarks':landmarks,
'sorted_meaning_lists':sorted_meaning_lists,
'learn_objects':False,
'delay':(j*num_scenes+i)/10.0})
# cache['new_scene_descs'] = new_scene_descs
# cache['new_test_scene_descs'] = new_test_scene_descs
# chunksize = 5
proc_batches = []
for scene in new_scene_descs:
proc_batch = []
for chunk in chunks(zip(scene['lmks'],scene['loc_descs'],scene['ids']),chunksize):
lmks, loc_descs, ids = zip(*chunk)
proc_batch.append({
'scene':scene['scene'],
'speaker':scene['speaker'],
'all_meanings':scene['all_meanings'],
'loi':scene['loi'],
'loi_infos':scene['loi_infos'],
'landmarks':scene['landmarks'],
'sorted_meaning_lists':scene['sorted_meaning_lists'],
'learn_objects':scene['learn_objects'],
'lmks':lmks,
'loc_descs':loc_descs,
'ids':ids,
'delay':scene['delay']})
proc_batches.append(proc_batch)
batches = map(None,*proc_batches)
batches = map(lambda x: filter(None,x), batches)
print len(batches)
for batch in batches:
print ' ',len(batch)
for i,batch in enumerate(batches):
logger('Training on batch %i' % i)
lists = parmap(loop,batch)
# lists = map(loop,batch)
result = list(interleave(*lists))
object_answers, object_distributions, object_sentences, object_ids = zip(*result)
logger('Testing after batch %i' % i)
test_lists = parmap(loop,new_test_scene_descs)
# test_lists = map(loop,new_test_scene_descs)
test_result = list(interleave(*test_lists))
test_object_answers, test_object_distributions, test_object_sentences, test_object_ids = zip(*test_result)
f = shelve.open(filename)
f['object_answers'] += object_answers
f['object_distributions'] += object_distributions
f['object_sentences'] += object_sentences
f['object_ids'] += object_ids
f['chunks'] += [len(result)]
f['test_object_answers'] += [test_object_answers]
f['test_object_distributions'] += [test_object_distributions]
f['test_object_sentences'] += [test_object_sentences]
f['test_object_ids'] += [test_object_ids]
f.close()
logger("Exiting")
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num_iterations', type=int, default=1)
parser.add_argument('-u', '--update_scale', type=int, default=1000)
parser.add_argument('-p', '--num_processors', type=int, default=7)
parser.add_argument('-s', '--num_samples', action='store_true')
args = parser.parse_args()
# scene, speaker = construct_training_scene()
autocorrect(args.num_iterations, # window=args.window_size,
scale=args.update_scale, num_processors=args.num_processors, num_samples=args.num_samples,)
UTF-8
Python
false
false
2,013
12,197,707,139,646
84d1093fe172c134f91102809f82ee338e23c775
ec38d09259b91bdfad290304976eca21375dee3a
/052.py
1bdfc7dfe8d339f92e3e81557b1237413794d1c4
[]
no_license
notonlysuccess/ProjectEuler
https://github.com/notonlysuccess/ProjectEuler
192203d803e4dd3ff3476d6de7917902e9cdbe81
4195be5e22b893d0f33dfc39b85e34a596c2b727
refs/heads/master
2020-03-29T12:17:35.813644
2013-07-05T09:43:24
2013-07-05T09:43:24
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
for x in xrange(1,10000000):
for i in xrange(2 , 7):
if sorted(str(x*i)) != sorted(str(x)):
break
else:
print x
break
#!/usr/bin/env python2
import os
import re
import sys
import numpy as np
regex = re.compile("(.+)_(\d+)x\d+_(\d+)_(\d+).bin")
array = None
name = ""
resolution = 0
for filename in sys.argv[1:]:
m = regex.match(filename)
if m is None:
print filename, "fail"
continue
name = m.groups()[0]
resolution = int(m.groups()[1])
x = int(m.groups()[2])
y = int(m.groups()[3])
if array is None:
array = np.zeros((resolution, resolution), dtype=np.float64)
part = np.fromfile(filename, dtype=np.float64)
part_size = np.sqrt(part.shape[0])
part.resize((part_size, part_size))
array[x:x+part_size, y:y+part_size] = part
print filename
array[resolution/2:resolution, :] = np.float64(1.0) - array[resolution/2-1::-1, :]
array.tofile("%s_%ix%i.bin" % (name, resolution, resolution))
UTF-8
Python
false
false
2,014
1,700,807,066,182
0c2e7fdb30d02a29568efa75652cc030ef1d4814
4033877aababccf04f3ffda0377eaaa554432d77
/genastack_roles/mysql_connector/__init__.py
c7b319251d08b452042f9575154f130a16f26b0b
[
"GPL-3.0-only"
]
non_permissive
cloudnull/genastack_roles
https://github.com/cloudnull/genastack_roles
f89d3a2bb1c4e7faba27b048ce726c0de613c69b
6a3c835257d23fe260966e3078a3433f125e7cbb
refs/heads/master
2016-09-05T20:38:00.243469
2014-05-28T21:17:45
2014-05-28T21:17:45
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# =============================================================================
# Copyright [2013] [Kevin Carter]
# License Information :
# This software has no warranty, it is provided 'as is'. It is your
# responsibility to validate the behavior of the routines and its accuracy
# using the code provided. Consult the GNU General Public license for further
# details (see GNU General Public License).
# http://www.gnu.org/licenses/gpl.html
# =============================================================================
import os
from genastack.common import utils
ARGS = utils.get_role_config('mysql-connector')
BRANCH = ARGS.get('branch', 'master')
PROJECT_URL = ARGS.get(
'project_url',
'http://dev.mysql.com/get/Downloads/Connector-C'
'/mysql-connector-c-6.1.3-src.tar.gz'
)
TEMP_PATH = utils.return_temp_dir()
WORK_PATH = utils.return_rax_dir()
LIBS_PATH = utils.return_rax_dir(path='openstack/lib')
NAME = 'mysql-connector-c-6.1.3-src.tgz'
INSTALL_COMMANDS = [
'cmake -G "Unix Makefiles"',
'cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug',
'cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%s' % WORK_PATH,
'make install'
]
BUILD_DATA = {
'mysql_connector': {
'help': 'Install upstream mysql_connector_c.',
'build': [
{
'get': {
'url': PROJECT_URL,
'path': TEMP_PATH,
'name': NAME,
'md5sum': '490e2dd5d4f86a20a07ba048d49f36b2',
'uncompress': True
},
'export': [
'LD_RUN_PATH=%s' % LIBS_PATH
],
'not_if_exists': os.path.join(LIBS_PATH, 'libmysqlclient.so'),
'build_commands': INSTALL_COMMANDS,
}
],
'package_install': {
'apt': {
'packages': [
'cmake'
]
}
}
}
}
UTF-8
Python
false
false
2,014
2,989,297,239,368
a2d7047ed1d0f87cab2f5a748f1df027b94d0cba
c3fc5f9969b63522e94ba32c0b8cb822ac7cdaa2
/project06/HackAssembler.py
b7458cc77059364410dafe26606d14fab3d36f6a
[]
no_license
gik0geck0/csci410-buland
https://github.com/gik0geck0/csci410-buland
3b32f4e12e4fce9e225d15c51be275b91190598b
1a6b9f80b07dc9b29083ed3bc3405b02bd9097f5
refs/heads/master
2016-09-07T04:29:37.573422
2012-12-03T07:48:41
2012-12-03T07:48:41
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
# Matt Buland's HackAssembler written in Python
import sys
import math
import re
class Parser:
def __init__(self, inFileName):
self.inFile = None
self.outFile = None
self.instructions = [] # list of 16 'bit' arrays. (2-D structure)
self.errorFlag = False # flag that's thown if there is an error. (prevents writing to the file)
self.symbolTable = {'SP' : 0,
'LCL' : 1,
'ARG' : 2,
'THIS' : 3,
'THAT' : 4,
'R0' : 0,
'R1' : 1,
'R2' : 2,
'R3' : 3,
'R4' : 4,
'R5' : 5,
'R6' : 6,
'R7' : 7,
'R8' : 8,
'R9' : 9,
'R10' : 10,
'R11' : 11,
'R12' : 12,
'R13' : 13,
'R14' : 14,
'R15' : 15,
'SCREEN' : 16384,
'KBD' : 24576 }
self.nVars = 16 # next available vars. spot
self.inFile = open(inFileName, 'r')
if not self.inFile:
print "Parser was given a null file. Exiting"
exit()
outFileName = inFileName[:-4] + ".hack"
print "Output file name: " + outFileName
self.outFile = open(outFileName, 'w')
# builds the symbol table
def parseSymbols(self):
print "Parsing symbols"
lineNum = 0
for line in self.inFile.readlines():
line = line.strip()
symMatch = re.match("\((.*)\)",line)
if symMatch:
labelMatch = re.match("[a-zA-Z_\.\$:][a-zA-Z_\.\$:0-9]*", symMatch.group(1))
if labelMatch:
# we found a symbol
self.symbolTable[symMatch.group(1)] = lineNum
lineNum-=1 # labelling does not count as a line
else:
# we found an invalid symbol name
print "Invalid variable name on line " + str(lineNum) + " : " + symMatch.group(0)
errorFlag = True
if line == "" or re.match("\/\/.*", line):
lineNum-=1
lineNum+=1
# resets the input file pointer to the beginning of the file
def reset(self):
self.inFile.seek(0)
# adds an A instruction to the list of instructions, loading the given number into A
def addAInstruction(self, number):
# an instruction will be defined with the MSB at index 0
instr = [0]*16
self.getBinary(number, instr, 14)
#print "A Instruction: " + str(instr)
#print instr
self.instructions.append(instr)
# does a second-pass assemble
def parseFoReal(self):
lineNum = 0
for line in self.inFile.readlines():
lineNum+=1
line = line.strip()
#print "--------------------------------------------------------------------------"
#print "Line : " + line.strip()
cInstrMatch = re.match("\s*(([AaMmDd]{1,3})\s*=)?\s*([!-])?([AaMmDd01])\s*(([+&|-])\s*([AaMmDd1]))?(;\s*(JGT|JEQ|JGE|JLT|JNE|JLE|JMP))?.*", line)
if re.match("\/\/.*",line):
# line is a comment
continue
elif re.match("\s*@", line):
# contains an @ symbol at the beginning of the line
# search for a digit following the @
#print "A instruction"
digMatch = re.match("\d+", line[1:])
if digMatch:
# a digit follows the @ sign. This means load the number into A.
self.addAInstruction(digMatch.group(0))
else:
# didn't find a digit, so let's now look for a label
labelMatch = re.match("[a-zA-Z_\.\$:][a-zA-Z_\.\$:0-9]*",line[1:])
# label lookup, replace with value
if labelMatch.group(0) in self.symbolTable:
# the label has been defined before
self.addAInstruction(self.symbolTable[labelMatch.group(0)])
else:
# this is a new variable
nextVarVal = self.getNextVars(self.symbolTable)
self.symbolTable[labelMatch.group(0)] = nextVarVal
self.advanceNextVars()
self.addAInstruction(self.symbolTable[labelMatch.group(0)])
elif cInstrMatch:
#print "C instruction"
# line is a C-instruction. Could be jump, or not
a = [0]
comp = [0]*6
dest = [0]*3
jump = [0]*3
# group 0 = full match
# group 1 = optional dest
# group 2 = dest
# group 3 = optional ! or -
# group 4 = first part of expression
# group 5 = optional rest of expression
# group 6 = operation +-&|
# group 7 = second part of expression
# group 8 = optional jump section
# group 9 = jump string
gr = [0]*10
for i in range(10):
gr[i] = cInstrMatch.group(i)
#print "Group " + str(i) + " : " + str(gr[i])
if gr[2]:
if re.match(".*[Aa].*", gr[2]): dest[0] = 1
if re.match(".*[Dd].*", gr[2]): dest[1] = 1
if re.match(".*[Mm].*", gr[2]): dest[2] = 1
if gr[9] != str(None) and gr[9] != None:
#print "Checking JUMPS"
jDict = { 'JGT' : [0, 0, 1],
'JEQ' : [0, 1, 0],
'JGE' : [0, 1, 1],
'JLT' : [1, 0, 0],
'JNE' : [1, 0, 1],
'JLE' : [1, 1, 0],
'JMP' : [1, 1, 1] }
jump = jDict[gr[9]]
#print jump
if not jump:
#print "An error occurred with the jump dictionary"
exit()
if gr[3]:
if re.match("!",gr[3]):
comp[5] = 1
if re.match(".*[Dd].*",gr[4]):
for i in (2, 3):
comp[i] = 1
elif re.match(".*[Aa].*",gr[4]):
for i in (0, 1):
comp[i] = 1
elif re.match(".*[Mm].*",gr[4]):
for i in (0, 1):
comp[i] = 1
a = [1]
else:
print "Unacceptable character following '!' on line " + str(lineNum)
elif re.match("-",gr[3]):
comp[4] = 1
if re.match("1",gr[4]):
for i in (0, 1, 2):
comp[i] = 1
elif re.match(".*[Dd].*",gr[4]):
for i in (2, 3, 5):
comp[i] = 1
elif re.match(".*[Aa].*",gr[4]):
for i in (0, 1, 5):
comp[i] = 1
elif re.match(".*[Mm].*",gr[4]):
for i in (0, 1, 5):
comp[i] = 1
a = [1]
else:
print "Unacceptable character following '-' on line " + str(lineNum)
else:
# Does not contain ! or -
if gr[5]:
# Has more to the expression than just the first component
if re.match("\+",gr[6]):
comp[4] = 1
if re.match(".*[Dd].*", gr[4]):
if re.match(".*1.*", gr[7]):
for i in (1, 2, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Aa].*", gr[7]):
comp[4] = 1 # prevent the else block
elif re.match(".*[Mm].*", gr[7]):
comp[4] = 1 # prevent the else block
a = [1]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Aa].*", gr[4]):
if re.match("[1]", gr[7]):
for i in (0, 1, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Dd].*", gr[7]):
comp[4]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Mm].*", gr[4]):
a = [1]
if re.match(".*1.*", gr[7]):
for i in (0, 1, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Dd].*", gr[7]):
comp[4]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*1.*", gr[4]):
if re.match(".*[Aa].*", gr[7]):
for i in (0, 1, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Dd].*", gr[7]):
for i in (1, 2, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Mm].*", gr[7]):
a = [1]
for i in (1, 2, 3, 4, 5):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
continue
elif re.match(".*-.*", gr[6]):
comp[4] = 1
if re.match(".*[Dd].*", gr[4]):
if re.match(".*[Aa].*", gr[7]):
for i in (1, 5):
comp[i] = 1
elif re.match(".*[Mm].*", gr[7]):
a = [1]
for i in (1, 5):
comp[i] = 1
elif re.match(".*1.*", gr[7]):
for i in (2, 3):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Aa].*", gr[4]):
if re.match(".*[Dd].*", gr[7]):
for i in (3, 5):
comp[i] = 1
elif re.match(".*1.*", gr[7]):
for i in (0, 1):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Mm].*", gr[4]):
a = [1]
if re.match(".*[Dd].*", gr[7]):
for i in (3, 5):
comp[i] = 1
elif re.match(".*1.*", gr[7]):
for i in (0, 1):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[4]
elif re.match(".*&.*", gr[6]):
if (re.match(".*[Dd].*", gr[4]) and re.match(".*[Dd].*", gr[7])) or (re.match(".*[Aa].*", gr[4]) and re.match(".*[Aa].*", gr[7])) or (re.match(".*[Mm].*", gr[4]) and re.match(".*[Mm].*", gr[7])) or not re.match(".*[AaDdMm].*", gr[4]) or not re.match(".*[AaDdMm].*", gr[7]):
print "Unacceptable operation: " + gr[4] + " & " + gr[7]
elif re.match(".*[Mm].*", gr[4]) or re.match(".*[Mm].*", gr[7]):
a = [1]
elif re.match(".*\|.*", gr[6]):
if re.match(".*[Dd].*", gr[4]):
if re.match(".*[Aa].*", gr[7]):
for i in (1, 3, 5):
comp[i] = 1
elif re.match(".*[Mm].*", gr[7]):
a = [1]
for i in (1, 3, 5):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Aa].*", gr[4]):
if re.match(".*[Dd].*", gr[7]):
for i in (1, 3, 5):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
elif re.match(".*[Mm].*", gr[4]):
a = [1]
if re.match(".*[Dd].*", gr[7]):
for i in (1, 3, 5):
comp[i] = 1
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[4]
else:
print "Unknown or unacceptable identifier on line " + str(lineNum) + ": " + gr[7]
else:
# deal with only seeing [01AMD]
if re.match(".*0.*",gr[4]):
for i in (0, 2, 4):
comp[i] = 1
elif re.match(".*1.*",gr[4]):
for i in (0, 1, 2, 3, 4, 5):
comp[i] = 1
elif re.match(".*[Dd].*",gr[4]):
for i in (2, 3):
comp[i] = 1
elif re.match(".*[Aa].*",gr[4]):
for i in (0, 1):
comp[i] = 1
elif re.match(".*[Mm].*",gr[4]):
for i in (0, 1):
comp[i] = 1
a = [1]
else:
print "Unknown identifier after '=' on line " + str(lineNum)
self.addCInstruction(a, comp, dest, jump)
# takes in the dest, comp, and jump arrays, and turns them into the instruction
def addCInstruction(self, a, comp, dest, jump):
#print "C-instruction: " + str(a) + " " + str(comp) + " " + str(dest) + " " + str(jump)
if len(a) != 1:
print "The a bit is not 1 bit long!"
exit()
if len(comp) != 6:
print "Comp not 6 bits long!"
exit()
if len(dest) != 3:
print "Dest not 3 bits long!"
exit()
if len(jump) != 3:
print "Jump not 3 bits long!"
exit()
instruction = [1]*3 + a + comp + dest + jump
#print "C Instruction: " + str(instruction)
self.instructions.append(instruction)
def getNextVars(self, symTable):
return self.nVars
def advanceNextVars(self):
self.nVars += 1
# writes all the instructions to the output file
def writeToFile(self):
print "Writing out"
for instr in self.instructions:
for bit in instr:
self.outFile.write(str(bit))
self.outFile.write("\n")
# closes the open files
def finish(self):
print "Closing files"
if self.inFile:
self.inFile.close()
if self.outFile:
self.outFile.close()
# takes a given integer, and recursively inflates its 15bit 2s compliment representation into the array index initial should be 14
def getBinary(self, number, array, index):
number = int(number)
if index >= 0:
if int(number) >= int(math.pow(2,index)):
array[15-index] = 1
number-=int(math.pow(2,index))
else:
# number not divisible by this index
array[15-index] = 0
index-=1
return self.getBinary(number, array, index)
return array
def verifyInputFileName():
if len(sys.argv) < 2:
print "Usage: python HackAssembler.py <fileToAssemble>"
exit()
inName = sys.argv[1]
if inName[-4:] != '.asm':
print "Provided file extension '" + inName[-4:] + "' is not an asm file."
exit()
return inName
# begins the assembly process
def assemble():
inputFileName = verifyInputFileName()
parser = Parser(inputFileName)
parser.parseSymbols()
parser.reset()
parser.parseFoReal()
parser.writeToFile()
# files should be closed whether there's an error, or not
parser.finish()
assemble()
from flask import Flask, render_template, request, g, redirect, url_for
from time import localtime, strftime
import sqlite3
import csv
app = Flask(__name__)
DATABASE = 'blog.db'
csvname = "data.csv"
comcsvname = "comments.csv"
#return dictionary from db
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def connect_to_database():
return sqlite3.connect(DATABASE)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = connect_to_database()
db.row_factory = dict_factory
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route("/")
@app.route("/index")
def index():
return render_template("index.html",posts=getPosts())
@app.route("/post/<id>/<title>",methods=['POST','GET'])
def post(title=None,id=None):
curr_post = getPost(id)
curr_comments = getComments(id)
if request.method == 'GET':
#print curr_comments
return render_template("post.html",post=curr_post,comments=curr_comments)
else:
addComment()
#return render_template("post.html",post=curr_post,comments=curr_comments)
return redirect(url_for("post",title=title,id=id))
@app.route("/newpost", methods=["GET","POST"])
def newpost():
if request.method=='POST':
t = request.form.get("Titles", None)
newp = request.form.get("PostInfo", None)
user = request.form.get("author",None);
submit = request.form.get("submit", None)
c = get_db().cursor()
try:
if (len(t) >0 and len(newp) >0 and submit =="post"):
if (len(user)<=0):
user = "Anonymous"
#####START OF DATABASE STUFF#######
todayd = strftime("%x")
c.execute("SELECT COUNT(*) FROM posts")
iidd = c.fetchone()["COUNT(*)"]+1
v = (iidd,t,newp,todayd,user)
c.execute("INSERT INTO posts VALUES(?,?,?,?,?)",v)
get_db().commit();
return redirect(url_for('post',title=t,id=iidd))
else:
errormsg ="Make sure your post and title are not empty"
return render_template("newPost.html", errormsg= errormsg)
except:
errormsg ="Make sure your post and title are not empty"
return render_template("newPost.html", errormsg= errormsg)
else:
return render_template("newPost.html")
def initialize():
print "Initializing"
conn = sqlite3.connect('blog.db')
conn.row_factory = dict_factory
c = conn.cursor()
try:
c.execute("CREATE TABLE posts(id INTEGER UNIQUE, title TEXT UNIQUE, content TEXT, date TEXT, author TEXT)")
c.execute("CREATE TABLE comments(id INTEGER, content TEXT, date TEXT, author TEXT)")
print "Creating new tables called 'posts' and 'comments' in blog.db"
except:
print "Adding to tables 'posts' and 'comments' in blog.db"
BASE = "INSERT INTO posts VALUES('%(id)s','%(title)s', '%(content)s', '%(date)s', '%(author)s')"
for l in csv.DictReader(open(csvname)):
try:
q = BASE%l
c.execute(q)
print "Inserted into db"
except:
pass
conn.commit()
BASE = "INSERT INTO comments VALUES('%(id)s','%(content)s', '%(date)s', '%(author)s')"
c.execute("SELECT COUNT(*) FROM comments")
count = c.fetchone()
if(count["COUNT(*)"] == 0):
for l in csv.DictReader(open(comcsvname)):
try:
q = BASE%l
c.execute(q)
except:
print "Not inserted."
pass
conn.commit()
def addComment():
time = strftime("%b %d %Y %I:%M %p",localtime())
id = request.path.split("/")[2]
c = get_db().cursor()
#c.execute("SELECT COUNT(*) FROM comments")
count = c.fetchone()
name = request.form['name']
comment = request.form['comment-text']
if (len(comment)>0):
if (len(name)<0):
name ="Anonymous"
v = (id,comment,time,name)
c.execute("INSERT INTO comments VALUES (?,?,?,?)",v)
#print name
#print comment
get_db().commit()
print "Added comment"
def getPosts():
c = get_db().cursor()
c.execute("SELECT * FROM posts")
posts = c.fetchall()
return posts
def getPost(id):
c = get_db().cursor()
i = (id,)
c.execute("SELECT * FROM posts where id=?",i)
return c.fetchone()
def getComments(id):
c = get_db().cursor()
i = (id,)
c.execute("SELECT * FROM comments WHERE id=?",i)
comments = c.fetchall()
print comments
return comments
if __name__=="__main__":
initialize()
#conn = sqlite3.connect('blog.db')
#getPosts()
app.debug=True
app.run(port=5000)
UTF-8
Python
false
false
2,014
9,715,216,065,482
e61ad11a901458638f145aaaa9f99981240ef80d
58444374ac728cb67175f3eb66337053771e7472
/mesh.py
9f150a54b27044d7af12231dee87ce6905126a4b
[]
no_license
thearn/su2_mesh_component
https://github.com/thearn/su2_mesh_component
562e9092809d97ac5bf908066057fa4219a3013c
e44ea861b0fb4dc9411d75eaa4111188ec5599af
refs/heads/master
2020-03-26T22:32:52.692406
2013-10-01T23:50:50
2013-10-01T23:50:50
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import numpy as np
ASCII_FACET = """ facet normal {face[0]:e} {face[1]:e} {face[2]:e}
outer loop
vertex {face[3]:e} {face[4]:e} {face[5]:e}
vertex {face[6]:e} {face[7]:e} {face[8]:e}
vertex {face[9]:e} {face[10]:e} {face[11]:e}
endloop
endfacet"""
def parse(fn, marker_tags = ['LOWER_SIDE','UPPER_SIDE', 'TIP' ]):
f = open(fn,'rb')
connections = []
locations = {}
connect_done = False
for line in f:
if "NDIME" in line:
ndim = int(line.split("=")[-1].strip())
elif "NELEM" in line:
nelem = int(line.split("=")[-1].strip())
elif "NPOIN" in line:
npoin = int(line.split("=")[-1].split()[-1])
f.close()
f = open(fn,'rb')
i=1
for line in f:
if "=" in line:
continue
if i <= nelem:
data = [int(x) for x in line.split()]
connections.append(data)
i+=1
else:
break
f.close()
f = open(fn,'rb')
i=1
for line in f:
if "=" in line:
continue
elif i <= nelem:
i+=1
elif i <= nelem + npoin:
S = line.split()
idx = int(S[-1])
data = [float(x) for x in S[:3]]
locations[idx] = data
i+=1
else:
break
f.close()
f = open(fn,'rb')
inners = {}
start, working = False, False
for line in f:
if "NMARK" in line:
nmark = int(line.split("=")[-1].strip())
start = True
elif "FFD_NBOX" in line:
break
elif start:
if "MARKER_TAG" in line:
name = line.split()[-1]
inners[name] = []
elif "MARKER_ELEMS" in line:
marker_elems = int(line.split("=")[-1])
else:
try:
data = [int(x) for x in line.split()[1:]]
inners[name].append(data)
except:
pass
f.close()
xyzs = []
#triangs = []
c_inners = []
for m in marker_tags:
c_inners+=inners[m]
for vertex in c_inners:
#triangs.append(vertex)
locs = [0,0,0]
for idx in vertex:
locs.extend(locations[idx])
print locs
xyzs.append(locs)
return np.array(xyzs)
def _build_ascii_stl(facets):
"""returns a list of ascii lines for the stl file """
lines = ['solid ffd_geom',]
for facet in facets:
lines.append(ASCII_FACET.format(face=facet))
lines.append('endsolid ffd_geom')
return lines
def writeSTL(facets, file_name, ascii=True):
"""outputs an STL file"""
f = open(file_name,'w')
if ascii:
lines = _build_ascii_stl(facets)
f.write("\n".join(lines))
else:
data = _build_binary_stl(facets)
f.write("".join(data))
f.close()
facets = parse("mesh_DLRF6_inv.su2", marker_tags=['PLANE'])
writeSTL(facets, "mesh2.stl")
#coding:utf8
'''
Created on 2011-9-9
@author: lan
'''
from app.scense.utils import dbaccess
from MySQLdb.cursors import DictCursor
ALL_BUFF_INFO = {}
def getAllBuffInfo():
'''获取所有技能的信息'''
sql = "SELECT * FROM tb_buff_info"
cursor = dbaccess.dbpool.cursor(cursorclass=DictCursor)
cursor.execute(sql)
result=cursor.fetchall()
cursor.close()
data = {}
for buff in result:
data[buff['buffId']] = buff
effectInfo = getBuffEffect(buff['buffEffectID'])
data[buff['buffId']]['buffEffects'] = effectInfo
return data
def getBuffEffect(buffEffectID):
'''获取buff效果'''
sql = "SELECT * FROM tb_buff_effect where buffEffectID = %d"%buffEffectID
cursor = dbaccess.dbpool.cursor(cursorclass=DictCursor)
cursor.execute(sql)
result=cursor.fetchone()
cursor.close()
return result
UTF-8
Python
false
false
2,014
9,388,798,545,274
6f4d5920c3dbfa173b5503cc5c6ca425927cf38f
177641027416fb2ffbbe80f34d5dd6ba22b846c4
/clustering_project/test_code/test_code.py
64dc1c0b09677033740b84935d92b7fed86ef4fa
[]
no_license
haehn/clustering_project
https://github.com/haehn/clustering_project
ffe02f00b3054ab10b756ee042aa6ea6bdc0f525
567be390b2a8f9584051e901b01957498c9d6a2c
refs/heads/master
2021-01-21T11:46:27.030186
2013-02-08T09:33:51
2013-02-08T09:33:51
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python
# -*- coding: utf-8 -*-
from sequence_collection import SequenceCollection
import numpy as np
import copy
import cPickle
np.set_printoptions(precision=2, linewidth=200)
"""
Protocol for running analysis:
1) Create SequenceCollection object, e.g.:
col = SequenceCollection( directory, datatype = 'protein' )
This reads sequence alignments from an input directory
(fasta format, phylip sequential, or phylip interleaved)
Datatypes allowed are 'protein' and 'dna'
2) Get phylogenetic trees for each gene with
col.put_trees_parallel( program='treecollection', tmpdir = '/tmp' )
Available programs - 'phyml', 'raxml', 'treecollection'
3) Do hierarchical clustering on the trees, e.g.:
col.put_partitions( metrics=['euc', 'geodesic', 'sym'], linkages=['single','complete'],
nclasses=[4,5,6])
available metrics are 'euc' for euclidean distance, 'geodesic', 'rf' for
weighted Robinson-Foulds (with branch lengths), and 'sym' for symmetric difference,
i.e. standard, topology-only Robinson-Foulds
4) Propagate the results through the data structure with:
col.put_clusters()
col.put_cluster_trees_parallel()
5) Compare scores derived from clusters to random permutation of the original data
either by making a copy of the SequenceCollection object, with clusters made up
of the same number of genes with the same number of characters, or by randomising
the alignments and performing hierarchical clustering on the randomised data
if the former, do rand1 = col.make_randomised_copy
if the latterm do rand2 = SequenceCollection(records=col.get_randomised_alignments(),
datatype = 'protein')
"""
# indir = '/Users/kgori/git/kevin/yeast_data/MSA'
indir = '/Users/kgori/git/kevin/data/simulated_data/eight/MSA'
col = SequenceCollection(indir, datatype='protein')
ran = SequenceCollection(records=col.get_randomised_alignments(), datatype='protein')
col.put_trees_parallel()
ran.put_trees_parallel()
col.put_partitions(metrics=['euc','rf','sym'], linkages=['ward'], nclasses=[2,3,4,5,6,7,8,9,10])
ran.put_partitions(metrics=['euc','rf','sym'], linkages=['ward'], nclasses=[2,3,4,5,6,7,8,9,10])
col.put_clusters()
col.put_cluster_trees_parallel()
ran.put_clusters()
ran.put_cluster_trees_parallel()
rn2 = col.make_randomised_copy()
r1 = ran.get_clusters()
r2 = rn2.get_clusters()
cl = col.get_clusters()
for k in sorted(cl):
print k
print 'Clustering from true data: ', cl[k].score
print 'Clustering from randomised data: ', r1[k].score
print 'Clustering from true data + randomising result:', r2[k].score
print
cPickle.dump(col, file('col.pickle','w'))
cPickle.dump(ran, file('ran.pickle','w'))
cPickle.dump(rn2, file('rn2.pickle','w'))
"""
YEAST RESULT
('euc', 'ward', 2)
Clustering from true data: 564.248
Clustering from randomised data: 503.7414
Clustering from true data + randomising result: 505.216
('euc', 'ward', 3)
Clustering from true data: 559.109
Clustering from randomised data: 502.2794
Clustering from true data + randomising result: 505.216
('euc', 'ward', 4)
Clustering from true data: 559.1094
Clustering from randomised data: 502.2794
Clustering from true data + randomising result: 504.764
('euc', 'ward', 5)
Clustering from true data: 558.8582
Clustering from randomised data: 501.863
Clustering from true data + randomising result: 504.7643
('euc', 'ward', 6)
Clustering from true data: 552.39
Clustering from randomised data: 497.3842
Clustering from true data + randomising result: 504.7646
('euc', 'ward', 7)
Clustering from true data: 552.3895
Clustering from randomised data: 497.2594
Clustering from true data + randomising result: 504.1181
('euc', 'ward', 8)
Clustering from true data: 551.3077
Clustering from randomised data: 479.8303
Clustering from true data + randomising result: 501.5788
('geodesic', 'ward', 2)
Clustering from true data: 566.453
Clustering from randomised data: 505.511
Clustering from true data + randomising result: 507.962
('geodesic', 'ward', 3)
Clustering from true data: 562.8588
Clustering from randomised data: 503.738
Clustering from true data + randomising result: 507.9623
('geodesic', 'ward', 4)
Clustering from true data: 560.6918
Clustering from randomised data: 485.9653
Clustering from true data + randomising result: 507.1153
('geodesic', 'ward', 5)
Clustering from true data: 560.6914
Clustering from randomised data: 485.9649
Clustering from true data + randomising result: 506.0329
('geodesic', 'ward', 6)
Clustering from true data: 554.2232
Clustering from randomised data: 481.2518
Clustering from true data + randomising result: 506.0332
('geodesic', 'ward', 7)
Clustering from true data: 552.7076
Clustering from randomised data: 479.7468
Clustering from true data + randomising result: 505.8623
('geodesic', 'ward', 8)
Clustering from true data: 552.2001
Clustering from randomised data: 479.25047
Clustering from true data + randomising result: 502.6491
('sym', 'ward', 2)
Clustering from true data: 528.312
Clustering from randomised data: 466.111
Clustering from true data + randomising result: 507.962
('sym', 'ward', 3)
Clustering from true data: 494.925
Clustering from randomised data: 452.9
Clustering from true data + randomising result: 507.963
('sym', 'ward', 4)
Clustering from true data: 476.5304
Clustering from randomised data: 445.2736
Clustering from true data + randomising result: 507.9634
('sym', 'ward', 5)
Clustering from true data: 474.4247
Clustering from randomised data: 443.04789
Clustering from true data + randomising result: 506.1102
('sym', 'ward', 6)
Clustering from true data: 471.6305
Clustering from randomised data: 443.04829
Clustering from true data + randomising result: 504.1927
('sym', 'ward', 7)
Clustering from true data: 467.0354
Clustering from randomised data: 440.96919
Clustering from true data + randomising result: 504.1928
('sym', 'ward', 8)
Clustering from true data: 465.6466
Clustering from randomised data: 440.36436
Clustering from true data + randomising result: 503.28341
SMALL
('euc', 'ward', 2)
Clustering from true data: 6475.8809
Clustering from randomised data: 1861.68
Clustering from true data + randomising result: 1920.436
('euc', 'ward', 3)
Clustering from true data: 2731.1542
Clustering from randomised data: 1842.685
Clustering from true data + randomising result: 1914.138
('euc', 'ward', 4)
Clustering from true data: 170.0565
Clustering from randomised data: 1810.53
Clustering from true data + randomising result: 1910.846
('euc', 'ward', 5)
Clustering from true data: 167.93243
Clustering from randomised data: 1809.101
Clustering from true data + randomising result: 1907.7012
('euc', 'ward', 6)
Clustering from true data: 167.93233
Clustering from randomised data: 1800.9
Clustering from true data + randomising result: 1902.0062
('euc', 'ward', 7)
Clustering from true data: 167.93233
Clustering from randomised data: 1800.9
Clustering from true data + randomising result: 1901.7676
('euc', 'ward', 8)
Clustering from true data: 167.22473
Clustering from randomised data: 1797.0312
Clustering from true data + randomising result: 1898.6548
('geodesic', 'ward', 2)
Clustering from true data: 6475.8809
Clustering from randomised data: 1882.06
Clustering from true data + randomising result: 1920.436
('geodesic', 'ward', 3)
Clustering from true data: 3682.3542
Clustering from randomised data: 1842.685
Clustering from true data + randomising result: 1914.138
('geodesic', 'ward', 4)
Clustering from true data: 170.0565
Clustering from randomised data: 1810.53
Clustering from true data + randomising result: 1910.846
('geodesic', 'ward', 5)
Clustering from true data: 167.93243
Clustering from randomised data: 1802.329
Clustering from true data + randomising result: 1907.7012
('geodesic', 'ward', 6)
Clustering from true data: 167.93233
Clustering from randomised data: 1798.4602
Clustering from true data + randomising result: 1902.0062
('geodesic', 'ward', 7)
Clustering from true data: 167.93233
Clustering from randomised data: 1797.2582
Clustering from true data + randomising result: 1901.7676
('geodesic', 'ward', 8)
Clustering from true data: 165.55163
Clustering from randomised data: 1787.1609
Clustering from true data + randomising result: 1905.9766
('sym', 'ward', 2)
Clustering from true data: 6475.8909
Clustering from randomised data: 1835.746
Clustering from true data + randomising result: 1920.436
('sym', 'ward', 3)
Clustering from true data: 2730.8042
Clustering from randomised data: 1814.139
Clustering from true data + randomising result: 1914.138
('sym', 'ward', 4)
Clustering from true data: 170.0565
Clustering from randomised data: 1812.305
Clustering from true data + randomising result: 1910.846
('sym', 'ward', 5)
Clustering from true data: 168.59519
Clustering from randomised data: 1808.871
Clustering from true data + randomising result: 1908.525
('sym', 'ward', 6)
Clustering from true data: 166.21449
Clustering from randomised data: 1800.165
Clustering from true data + randomising result: 1912.732
('sym', 'ward', 7)
Clustering from true data: 164.22559
Clustering from randomised data: 1791.6549
Clustering from true data + randomising result: 1886.527
('sym', 'ward', 8)
Clustering from true data: 163.38334
Clustering from randomised data: 1790.0077
Clustering from true data + randomising result: 1885.2419
"""
UTF-8
Python
false
false
2,013
12,154,757,473,853
39556a535485f126a4a5ba1fc747611339ec5578
e0e5279092759a9ca0343ef390525d36ea29e16a
/course11/impl/src/run_single_program.py
d2c96a6cdaac1ee8c0d793a02084df79b78edee8
[]
no_license
mashony/diploma
https://github.com/mashony/diploma
d0fc2546a8a9ab51d3c8b802f6e4928b166a0170
aeb6cdb668aab055161acd1ae2875751d7b2b7e5
refs/heads/master
2020-12-24T16:58:53.173720
2013-04-27T14:38:10
2013-04-27T14:38:10
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python
# coding: utf-8
"""
Part of 'Adaptor' framework.
Author: Michael Pankov, 2012.
Run experiment with specified program with given compilers.
"""
import sys
import os
import subprocess as sp
import textwrap as tw
import timeit
import datetime as dt
import recordtype as rt
import collections as cl
import numpy as np
import matplotlib.pyplot as plt
import couchdbkit as ck
from couchdbkit.designer import push
import copy
import ipdb
class PrintableStructure():
"""A class to allow easy pretty printing of namedtuple and recordtype."""
def __str__(self):
c = self.__class__
s = tw.dedent("""
{name}:
""").format(name=c.__name__)
for k, v in self._asdict().items():
s += '\t{field:20}:\t{value}\n'.format(field=k, value=v)
return s
Context = rt.recordtype('Context',
'paths_stack settings')
SettingsBase = rt.recordtype('Settings',
'program_name benchmark_root_dir framework_root_dir '
'build_settings run_settings benchmark_bin_dir')
class Settings(PrintableStructure, SettingsBase):
pass
BuildSettingsBase = rt.recordtype('BuildSettings',
'compiler base_opt optimization_flags other_flags '
'benchmark_source_dir program_source linker_options')
class BuildSettings(PrintableStructure, BuildSettingsBase):
pass
RunSettingsBase = rt.recordtype('RunSettings',
'')
class RunSettings(PrintableStructure, RunSettingsBase):
pass
Input = cl.namedtuple('Input',
'benchmark_source_dir compiler base_opt')
CalibrationResultBase = cl.namedtuple('CalibrationResult',
'total_time time dispersion variance runs_number times_list')
class CalibrationResult(PrintableStructure, CalibrationResultBase):
pass
ValidationResultBase = cl.namedtuple('ValidationResult',
'real_time measured_time error relative_error')
class ValidationResult(PrintableStructure, ValidationResultBase):
pass
# The documents below sometimes correspond to plain records used
# to aggregate data during the work of framework
# (which are defined above).
# However, they only save the information relevant to experiment
# reproduction and meaningful to analytics.
class CalibrationResultDocument(ck.Document):
"""CouchDB document, describing the result of multiple measurements."""
total_time = ck.FloatProperty()
time = ck.FloatProperty()
dispersion = ck.FloatProperty()
variance = ck.FloatProperty()
runs_number = ck.IntegerProperty()
times_list = ck.ListProperty()
class ValidationResultDocument(ck.Document):
"""CouchDB document, describing the result of calibrations."""
real_time = ck.FloatProperty()
measured_time = ck.FloatProperty()
error = ck.FloatProperty()
relative_error = ck.FloatProperty()
class BuildSettingsDocument(ck.Document):
"""
CouchDB document, describing the settings with which
the program was built.
"""
compiler = ck.StringProperty()
base_opt = ck.StringProperty()
optimization_flags = ck.StringProperty()
other_flags = ck.StringProperty()
class RunSettingsDocument(ck.Document):
"""
CouchDB document, describing the settings with which
the program was run.
"""
pass
class SettingsDocument(ck.Document):
"""CouchDB document, describing the global settings of framework."""
program = ck.StringProperty()
build_settings = ck.SchemaProperty(BuildSettingsDocument)
run_settings = ck.SchemaProperty(RunSettingsDocument)
class ExperimentDocument(ck.Document):
"""CouchDB document, describing the experiment."""
datetime = ck.DateTimeProperty()
calibration_result = ck.SchemaProperty(CalibrationResultDocument)
validation_result = ck.SchemaProperty(ValidationResultDocument)
settings = ck.SchemaProperty(SettingsDocument)
class NonAbsolutePathError(RuntimeError):
pass
class NoSuchNestedPathError(RuntimeError):
pass
definition = \
"""
from subprocess import Popen, PIPE
def run():
p = Popen("{command}".split(),
stdout=PIPE,
stderr=PIPE)
return p.communicate()
"""
def main():
"""Invoke all necessary builds and experiments."""
settings = Settings(program_name=None,
framework_root_dir=os.path.realpath(
os.path.join(os.path.dirname(__file__), '..')),
benchmark_root_dir=None,
benchmark_bin_dir=None,
build_settings=None,
run_settings=None)
context = Context(paths_stack=[],
settings=settings)
settings.benchmark_bin_dir = os.path.realpath(os.path.join(
settings.framework_root_dir, 'data/bin/'))
server, db = setup_database(settings, context)
settings.benchmark_root_dir = os.path.realpath(os.path.join(
settings.framework_root_dir, 'data/sources/time-test/'))
plot_error(context)
# plot_vs()
# for c, v in zip(cs, vs):
# e = create_experiment_document(context, c, v)
# e.save()
settings.benchmark_root_dir = os.path.realpath(os.path.join(
settings.framework_root_dir, 'data/sources/polybench-c-3.2/'))
nest_path_from_benchmark_root(context, '.')
es = []
n = 0
for path, dirs, files in os.walk('.'):
if files and not path.endswith('utilities') and not path == '.':
n += 1
settings.program_name = os.path.basename(path)
context.settings = settings
define_build_settings(settings,
path,
'-I utilities -I {0} utilities/polybench.c'.format(path))
b = settings.build_settings
b.compiler = 'gcc'
b.base_opt = '-O2'
define_run_settings(settings)
nest_path_absolute(context, settings.framework_root_dir)
e = perform_experiment(context)
es.append(e)
unnest_path(context)
y = map(lambda e: e.calibration_result.time, es)
yerr = map(lambda e: e.calibration_result.dispersion, es)
x = range(len(y))
plt.figure()
plt.scatter(x, y)
plt.errorbar(x, y, yerr=yerr, fmt=None)
plt.show()
unnest_path(context)
assert len(context.paths_stack) == 0
def plot_error(context):
nest_path_from_benchmark_root(context, '.')
settings = context.settings
settings.program_name = 'do_nothing'
define_build_settings(settings,
'',
'')
b = settings.build_settings
b.compiler = 'gcc'
b.base_opt = '-O0'
define_run_settings(settings)
cs, vs = validate_default(context)
y1 = map(lambda v: v.real_time, vs)
y2 = map(lambda v: v.measured_time, vs)
err = map(lambda v: v.relative_error, vs)
for p1, p2, e in zip(y1, y2, err):
print tw.dedent(
"""\
Experiment performed:
Real time: {0:.6f}
Measured time: {1:.6f}
Relative error: {2:.2f}
""".format(p1, p2, e))
raw_input()
x = range(len(y1))
plt.figure()
plt.axes().set_yscale('log')
plt2 = plt.scatter(x, y2, marker='+', s=160, c='r', label=u'измеренное время')
plt1 = plt.scatter(x, y1, label=u'реальное время')
plt.axes().set_xticks(range(len(y1)))
default_xticklabels = ['usleep_{0}'.format(10**i) for i in range(7)]
plt.axes().set_xticklabels(default_xticklabels)
plt.setp(plt.axes().get_xticklabels(), rotation=90)
plt.axes().set_xlabel(u'программа')
plt.axes().set_ylabel(u'время выполнения, с')
plt.axes().grid(axis='both')
p1 = plt.Rectangle((0, 0), 1, 1, fc='b')
p2 = plt.Rectangle((0, 0), 1, 1, fc='r')
plt.axes().legend((p1, p2), (plt1.get_label(), plt2.get_label()), loc='best')
plt.title(u'Математическое ожидание времени исполнения калибровочных программ и реальное время их исполнения')
plt.show()
unnest_path(context)
def plot_vs():
v = ExperimentDocument.view('adaptor/experiment-all')
l = []
for doc in v:
if doc.datetime > dt.datetime(2012,12,30,22,01,00):
l.append((doc.settings.build_settings.compiler,
doc.settings.program,
doc.calibration_result.time))
clang_es = filter(lambda e: e[0] == u'gcc', l)
gcc_es = filter(lambda e: e[0] == u'gcc', l)
clang_x_ticklabels = map(lambda e: e[1], clang_es)
gcc_x_ticklabels = map(lambda e: e[1], gcc_es)
clang_scurve = sorted(clang_es, key=lambda e: e[2])
clang_y = [e[2] for e in clang_scurve]
indices = map(lambda e: e[1], clang_scurve)
gcc_scurve = sorted(gcc_es, key=lambda e: indices.index(e[1]))
gcc_y = [e[2] for e in gcc_scurve]
points_clang = plt.scatter(range(len(clang_y)), clang_y, label='gcc')
points_gcc = plt.scatter(range(len(gcc_y)), gcc_y, c='r', label='gcc')
f = plt.gcf()
plt.axes().set_yscale('log')
plt.axes().set_xticks(range(len(clang_y)))
plt.axes().set_xticklabels(clang_x_ticklabels)
plt.setp(plt.axes().get_xticklabels(), rotation=90)
plt.axes().set_xlabel(u'программа')
plt.axes().set_ylabel(u'время выполнения, с')
plt.axes().grid(axis='both')
p1 = plt.Rectangle((0, 0), 1, 1, fc='b')
p2 = plt.Rectangle((0, 0), 1, 1, fc='r')
plt.axes().legend((p1, p2), (points_clang.get_label(), points_gcc.get_label()), loc='best')
plt.title(u"Время исполнения программ, скомпилированных двумя компиляторами на уровне оптимизации '-O2'")
plt.show()
ipdb.set_trace()
def calculate_overhead_time(context):
context = copy.deepcopy(context)
settings = context.settings
nest_path_from_root(context, 'data/sources/time-test')
saved_name = settings.program_name
settings.program_name = 'do_nothing'
saved_path = settings.benchmark_root_dir
settings.benchmark_root_dir = get_path(context)
define_build_settings(settings,
'',
'')
b = settings.build_settings
b.compiler = 'gcc'
b.base_opt = '-O0'
define_run_settings(settings)
build(context)
c = run_empty(context)
overhead_time = c.time
unnest_path(context)
settings.benchmark_root_dir = saved_path
settings.program_name = saved_name
return c, overhead_time
def define_build_settings(s, sources_path, other_flags):
s.build_settings = BuildSettings(
benchmark_source_dir=os.path.join(
s.benchmark_root_dir, '', sources_path),
program_source="{0}.c".format(s.program_name),
compiler=None,
base_opt=None,
optimization_flags=None,
other_flags=other_flags,
linker_options='-lm')
def define_run_settings(s):
s.run_settings = RunSettings()
def store_validation_document(v):
v_doc = make_validation_document(v)
v_doc.save()
def make_validation_document(v):
v_doc = ValidationResultDocument(
real_time=float(v.real_time),
measured_time=float(v.measured_time),
error=float(v.error),
relative_error=float(v.relative_error))
return v_doc
def push_path(context, path):
"""
Push path to stack in context.
Path must be absolute.
"""
if os.path.isabs(path):
context.paths_stack.append(path)
else:
raise NonAbsolutePathError
def pop_path(context):
"""
Pop path from stack in context.
Path is absolute.
"""
return context.paths_stack.pop()
def get_path(context):
"""
Return the path on top of stack in context.
"""
return context.paths_stack[-1]
def ensure_path(context):
"""
Get the correct current path from stack in context and
change current directory to there.
"""
os.chdir(get_path(context))
def nest_path_absolute(context, path):
"""
Receive path, push the real path of it to stack in context and
change current directory to there.
"""
try:
os.chdir(path)
except:
raise NoSuchNestedPathError
push_path(context, path)
ensure_path(context)
def nest_path_from_root(context, path):
"""
Receive path, relative to the root of framework,
push it to stack in context and change current directory to there.
"""
new_path = os.path.join(context.settings.framework_root_dir, path)
nest_path_absolute(context, new_path)
def nest_path_from_benchmark_root(context, path):
"""
Receive path, relative to the root of benchmark directory,
push it to stack in context and change current directory to there.
"""
new_path = os.path.join(context.settings.benchmark_root_dir, path)
nest_path_absolute(context, new_path)
def nest_path(context, path):
"""
Receive relative path, push the real path of it to stack in context and
change current directory to there.
"""
new_path = os.path.join(get_path(context), path)
nest_path_absolute(context, new_path)
def unnest_path(context):
"""
Pop the path from stack in context and
change current directory to current top path of stack.
"""
pop_path(context)
try:
ensure_path(context)
except:
# Fails when stack is empty after popping
pass
def validate_default(context):
"""
Perform validation on set of time-measurement programs and report errors.
"""
nest_path_absolute(context, context.settings.framework_root_dir)
vs = []
cs = []
c, overhead_time = calculate_overhead_time(context)
for i in range(7):
real_time_us = 10**i
s = 'usleep_{0}'.format(real_time_us)
context.settings.program_name = s
define_build_settings(context.settings,
'',
'')
context.settings.build_settings.compiler = 'gcc'
context.settings.build_settings.base_opt = '-O0'
build(context)
c, v = validate(context, real_time_us / 10.**6, overhead_time)
cs.append(c)
vs.append(v)
unnest_path(context)
return cs, vs
def validate(context, real_time, overhead_time):
"""
Validate calibration of single command.
"""
c = run(context)
measured_time = c.time - overhead_time
try:
error = abs(measured_time - real_time)
relative_error = error / real_time
except:
error = None
relative_error = None
v = ValidationResult(real_time, measured_time, error, relative_error)
return c, v
def calibrate_empty(context, command):
"""Calibrate execution of command until measurement is accurate enough."""
n = 0
t = 0
d_rel = 1
print "Begin"
command = os.path.join(get_path(context), command)
result = timeit.timeit(stmt='run()',
setup=definition.format(
command=command),
number=1)
print "\nTime of single run:", result,
if result > 1:
# When incremented in the loop, it'll become zero
n = -1
print ", pruning"
else:
print ''
while (t < 1) and (d_rel > 0.02):
sys.stderr.write('.')
n += 1
number = 10**(n)
result = timeit.repeat(stmt='run()',
setup=definition.format(
command=command),
number=number,
repeat=3)
t = min(result)
d = np.std(np.array(result))
d_rel = d / t
sys.stderr.write('\n')
return CalibrationResult(t, t / number, d, d_rel, number, result)
def calibrate(context, command):
"""Calibrate execution of command until measurement is accurate enough."""
n = 0
t = 0
d_rel = 1
print "Begin"
command = os.path.join(get_path(context), command)
result = timeit.timeit(stmt='run()',
setup=definition.format(
command=command),
number=1)
print "\nTime of single run:", result,
if result > 1:
# When incremented in the loop, it'll become zero
n = -1
print ", pruning"
else:
print ''
while (t < 1) and (d_rel > 0.05):
sys.stderr.write('.')
n += 1
number = 10**(n)
result = timeit.repeat(stmt='run()',
setup=definition.format(
command=command),
number=number,
repeat=3)
t = min(result)
d = np.std(np.array(result))
d_rel = d / t
sys.stderr.write('\n')
return CalibrationResult(t, t / number, d, d_rel, number, result)
def convert_input_to_settings(input):
"""Process user input (command line arguments) and return settings."""
program_name, benchmark_root_dir = \
os.path.split(os.path.realpath(Input[benchmark_source_dir]))
framework_root_dir, _ = os.path.split(os.path.realpath(__file__))
settings = Settings(program_name=program_name,
benchmark_root_dir=benchmark_root_dir,
framework_root_dir=framework_root_dir)
build_settings = BuildSettings(compiler=Input[compiler],
base_opt=Input[base_opt],
benchmark_source_dir=Input[benchmark_source_dir])
benchmark_bin_dir = os.path.join(framework_root_dir, 'data/bin/')
run_settings = RunSettings(benchmark_bin_dir=benchmark_bin_dir)
return settings, build_settings, run_settings
def perform_experiment(context):
"""Perform experiment."""
build(context)
_, o_t = calculate_overhead_time(context)
c, v = validate(context, None, o_t)
experiment = create_experiment_document(context, c, v)
print "Saving experiment now"
experiment.save()
return experiment
def create_experiment_document(context, c, v):
c_d = CalibrationResultDocument(
total_time=c.total_time,
time=c.time,
dispersion=c.dispersion,
variance=c.variance,
runs_number=c.runs_number,
times_list=c.times_list)
try:
v_d = ValidationResultDocument(
real_time=v.real_time,
measured_time=v.measured_time,
error=v.error,
relative_error=v.relative_error)
except:
v_d = None
b = context.settings.build_settings
b_d = BuildSettingsDocument(
compiler=b.compiler,
base_opt=b.base_opt,
optimization_flags=b.optimization_flags,
other_flags=b.other_flags)
r_d = RunSettingsDocument()
s_d = SettingsDocument(
program=context.settings.program_name,
build_settings=b_d,
run_settings=r_d)
experiment = ExperimentDocument(
settings=s_d,
calibration_result=c_d,
validation_result=v_d,
datetime=dt.datetime.utcnow())
return experiment
def print_experiments(db):
"""Print all the experiments."""
experiments = db.view('experiment/all')
for e in experiments.all():
print 'Experiment:'
print 'Build:', e['value']['command_build']
print 'Run:', e['value']['command_run']
print 'Date & time:', e['value']['datetime']
def setup_database(settings, context):
"""Setup the database."""
server = ck.Server()
db = server.get_or_create_db('adaptor')
ExperimentDocument.set_db(db)
SettingsDocument.set_db(db)
BuildSettingsDocument.set_db(db)
RunSettingsDocument.set_db(db)
CalibrationResultDocument.set_db(db)
ValidationResultDocument.set_db(db)
nest_path_from_root(context, 'couch/adaptor')
# We are stupid so we suppose the CouchApp is managed
# to be stable version and we just re-publish it on launch.
sp.check_call('couchapp push . http://localhost:5984/adaptor'.split())
unnest_path(context)
return server, db
def prepare_command_build(settings):
"""Prepare command for building of generic program."""
full_path_source = os.path.join(
"{build_settings.benchmark_source_dir}".format(**settings._asdict()),
"{build_settings.program_source}".format(**settings._asdict()))
full_path_binary = os.path.join(
"{benchmark_bin_dir}".format(**settings._asdict()),
"{program_name}".format(**settings._asdict()))
command = tw.dedent("""
{build_settings.compiler} {build_settings.base_opt}
{build_settings.other_flags} {0}
-o {1} {build_settings.linker_options}""").translate(None, '\n').format(
full_path_source, full_path_binary, **settings._asdict())
return command
def build(context):
"""Build the generic version of the program."""
command = prepare_command_build(context.settings)
nest_path_from_benchmark_root(context, '')
print os.path.realpath(os.path.curdir)
print command
nest_path_from_benchmark_root(context, '')
sp.call('mkdir bin'.split())
unnest_path(context)
sp.check_call(command.split())
unnest_path(context)
def prepare_command_run(settings):
"""Prepare command for running the program."""
command = tw.dedent("""
./{program_name}""").translate(None, '\n').format(
**settings._asdict())
return command
def run(context):
"""Run the generic version of program."""
command = prepare_command_run(context.settings)
nest_path_from_root(context, 'data/bin')
print command
r = calibrate(context, command)
unnest_path(context)
return r
def run_empty(context):
"""Run the generic version of program."""
command = prepare_command_run(context.settings)
nest_path_from_root(context, 'data/bin')
print command
r = calibrate_empty(context, command)
unnest_path(context)
return r
if __name__ == '__main__':
main()
"""
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Example binarytree
@copyright: See LICENSE
"""
class BinaryTree:
def __init__(self, **kwargs):
self.root = None
def add_node(self, data, **kwargs):
return Node(data)
def insert(self, root, data, **kwargs):
if root == None:
return self.add_node(data)
else:
if data <= root.data:
root.left = self.insert(root.left, data)
else:
root.right = self.insert(root.right, data)
return root
def find(self, root, target, **kwargs):
if root == None:
return False
else:
if target == root.data:
return True
else:
if target < root.data:
return self.find(root.left, target)
else:
return self.find(root.right, target)
def delete(self, root, target, **kwargs):
if root == None or not self.find(root, target):
return False
else:
if target == root.data:
del root
else:
if target < root.data:
return self.delete(root.left, target)
else:
return self.delete(root.right, target)
def get_min(self, root, **kwargs):
while(root.left != None):
root = root.left
return root.data
def get_max(self, root, **kwargs):
while(root.right != None):
root = root.right
return root.data
def get_depth(self, root, **kwargs):
if root == None:
return 0
else:
ldepth = self.get_depth(root.left)
rdepth = self.get_depth(root.right)
return max(ldepth, rdepth) + 1
def get_size(self, root, **kwargs):
if root == None:
return 0
else:
return self.get_size(root.left) + 1 + self.get_size(root.right)
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
UTF-8
Python
false
false
2,012
8,787,503,114,438
feaf753d849eaadc3dccaec89b5940a66a7f9e9c
8e3cf73959d2e675c42ca2da65d0b206ee6a7727
/src/warden/warden_init.py
569f4e5737c0f4423090d630df3f4821fe4a19cd
[
"MIT"
]
permissive
matthewhampton/warden
https://github.com/matthewhampton/warden
2b196edbd1036c267d051ab96470aeb2a7002f53
f3dfc4c658a0ea1af3625ed178d2d37455c84b67
refs/heads/master
2021-01-18T05:39:58.243178
2013-07-18T15:34:59
2013-07-18T15:34:59
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
"""
AFTER A NEW INSTALL OF WARDEN (using setup.py) we need to get the system ready for use
1) Make sure warden.settings exists
2) read warden.settings file (or use command line parameters, arguments etc)
3) carbon: ensure the required configuration files are present
4) diamond: ensure the required configuration files are present
5) gentry: read settings module
check if database exists... clear...syncdb...migrate etc..
"""
import getpass
import subprocess
from warden.AutoConf import autoconf, get_home
from warden_logging import log
import os
import sys
import imp
import base64
import textwrap
import re
from django.core import management
from distutils import dir_util, file_util
def setup(
home,
super_user,
project_name
):
"""
Warden uses values from its default settings file UNLESS explicitely defined
here in the constructor.
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'gentry.settings'
log.info ('$DJANGO_SETTINGS_MODULE = %s' % os.environ['DJANGO_SETTINGS_MODULE'])
from django.conf import settings as gsetts
database = gsetts.DATABASES['default']['NAME']
if not os.path.exists(os.path.dirname(database)):
os.makedirs(os.path.dirname(database))
management.execute_from_command_line(['manage.py', 'syncdb','--noinput'])
management.execute_from_command_line(['manage.py', 'migrate', '--noinput'])
# add a super user
if super_user:
username = super_user[0]
password = super_user[1]
email = super_user[2]
from sentry.models import User
try:
auser = User.objects.using('default').get(username=username)
except User.DoesNotExist:
auser = User.objects.db_manager('default').create_superuser(username, email, password)
log.info('Added Sentry superuser "%s" with password like "%s%s"' % (username, password[:3], '*'*(len(password)-3)))
else:
log.error('Username "%s" is already taken.' % username)
if project_name:
project_slug = project_name.lower().replace(' ','_')
try:
# add a project
from sentry.models import Project, Team
team = Team.objects.create(name=project_name + ' Team', slug=project_slug + '_team', owner=auser)
project = Project.objects.create(name=project_name, slug=project_slug, owner=auser, team=team)
key = project.key_set.filter(user=auser)[0]
dsn = "http://%s:%s@localhost:%s/%s" % (key.public_key, key.secret_key, gsetts.SENTRY_WEB_PORT, key.project_id)
log.info('Added "%s" project to Sentry with dsn: %s' % (project_name, dsn))
except Exception:
log.error('Failed to create project.')
def indent(text, spaces=4, strip=False):
"""
Borrowed from fabric
Return ``text`` indented by the given number of spaces.
If text is not a string, it is assumed to be a list of lines and will be
joined by ``\\n`` prior to indenting.
When ``strip`` is ``True``, a minimum amount of whitespace is removed from
the left-hand side of the given string (so that relative indents are
preserved, but otherwise things are left-stripped). This allows you to
effectively "normalize" any previous indentation for some inputs.
"""
# Normalize list of strings into a string for dedenting. "list" here means
# "not a string" meaning "doesn't have splitlines". Meh.
if not hasattr(text, 'splitlines'):
text = '\n'.join(text)
# Dedent if requested
if strip:
text = textwrap.dedent(text)
prefix = ' ' * spaces
output = '\n'.join(prefix + line for line in text.splitlines())
# Strip out empty lines before/aft
output = output.strip()
# Reintroduce first indent (which just got stripped out)
output = prefix + output
return output
def passprompt(prompt_str):
p1 = getpass.getpass(prompt_str)
p2 = getpass.getpass('(Again!) ' + prompt_str)
while p1 != p2:
p1 = getpass.getpass('(Um. They didn\'t match) ' + prompt_str)
p2 = getpass.getpass('(Again!) ' + prompt_str)
return p1
def prompt(text, default='', validate=None, password=False):
"""
Borrowed from fabric!
Prompt user with ``text`` and return the input (like ``raw_input``).
A single space character will be appended for convenience, but nothing
else. Thus, you may want to end your prompt text with a question mark or a
colon, e.g. ``prompt("What hostname?")``.
If ``default`` is given, it is displayed in square brackets and used if the
user enters nothing (i.e. presses Enter without entering any text).
``default`` defaults to the empty string. If non-empty, a space will be
appended, so that a call such as ``prompt("What hostname?",
default="foo")`` would result in a prompt of ``What hostname? [foo]`` (with
a trailing space after the ``[foo]``.)
The optional keyword argument ``validate`` may be a callable or a string:
* If a callable, it is called with the user's input, and should return the
value to be stored on success. On failure, it should raise an exception
with an exception message, which will be printed to the user.
* If a string, the value passed to ``validate`` is used as a regular
expression. It is thus recommended to use raw strings in this case. Note
that the regular expression, if it is not fully matching (bounded by
``^`` and ``$``) it will be made so. In other words, the input must fully
match the regex.
Either way, `prompt` will re-prompt until validation passes (or the user
hits ``Ctrl-C``).
.. note::
`~fabric.operations.prompt` honors :ref:`env.abort_on_prompts
<abort-on-prompts>` and will call `~fabric.utils.abort` instead of
prompting if that flag is set to ``True``. If you want to block on user
input regardless, try wrapping with
`~fabric.context_managers.settings`.
Examples::
# Simplest form:
environment = prompt('Please specify target environment: ')
# With default, and storing as env.dish:
prompt('Specify favorite dish: ', 'dish', default='spam & eggs')
# With validation, i.e. requiring integer input:
prompt('Please specify process nice level: ', key='nice', validate=int)
# With validation against a regular expression:
release = prompt('Please supply a release name',
validate=r'^\w+-\d+(\.\d+)?$')
# Prompt regardless of the global abort-on-prompts setting:
with settings(abort_on_prompts=False):
prompt('I seriously need an answer on this! ')
"""
default_str = ""
if default != '':
default_str = " [%s] " % str(default).strip()
else:
default_str = " "
# Construct full prompt string
prompt_str = text.strip() + default_str
# Loop until we pass validation
value = None
while value is None:
# Get input
value = (passprompt(prompt_str) if password else raw_input(prompt_str)) or default
# Handle validation
if validate:
# Callable
if callable(validate):
# Callable validate() must raise an exception if validation
# fails.
try:
value = validate(value)
except Exception, e:
# Reset value so we stay in the loop
value = None
print("Validation failed for the following reason:")
print(indent(e.message) + "\n")
# String / regex must match and will be empty if validation fails.
else:
# Need to transform regex into full-matching one if it's not.
if not validate.startswith('^'):
validate = r'^' + validate
if not validate.endswith('$'):
validate += r'$'
result = re.findall(validate, value)
if not result:
print("Regular expression validation failed: '%s' does not match '%s'\n" % (value, validate))
# Reset value so we stay in the loop
value = None
return value
def create_service(home):
if 'win' in sys.platform:
if hasattr(sys, "frozen"):
svc_exe = os.path.join(os.path.dirname(sys.executable), 'warden-svc.exe')
if os.path.exists(svc_exe):
log.info('Attempting to create service')
log.info('Output: \n%s',
subprocess.check_output([svc_exe, '-h', home, 'install']))
else:
pass
def main():
import argparse
import ConfigParser
parser = argparse.ArgumentParser(description='Warden init script')
parser.add_argument('home', nargs='?', help="the warden home folder")
prompt_args = [
('first-project', "the first sentry project", 'first_project'),
('super-user', "the user name for the admin user", 'super_user'),
('super-password', "the password for the admin user", 'super_password'),
('super-email', "the email address for the admin user", 'super_email'),
]
for arg,help,dest in prompt_args:
parser.add_argument('--%s' % arg, help=help, dest=dest, required=False)
args = parser.parse_args()
for arg,help,dest in prompt_args:
if not getattr(args, dest, None):
setattr(args, dest, prompt('Enter %s:' % (help), password='password' in arg))
home = get_home(args.home)
if not os.path.exists(home):
os.makedirs(home)
os.environ['WARDEN_HOME'] = home
dir_util.copy_tree(os.path.join(os.path.dirname(__file__), 'templateconf'), home)
autoconf(home)
suser = (args.super_user, args.super_password, args.super_email)
setup(home, suser, args.first_project)
create_service(home)
if __name__ == '__main__':
main()
#!/usr/bin/env python
"""
_GenerateMainScript_
For a Given TaskObject instance, create a StructuredFile
representing a 'Main' Script to run the task, and insert the
details of the Script into the ShREEKTask.
The StructuredFile instance is added to the TaskObject, and the
script name is set as the Executable attribute of the object
The StructuredFile is not actually populated to run any particular
executable, but rather provides a standard framework in which to
insert commands
"""
class GenerateMainScript:
"""
_GenerateMainScript_
Create a StructuredFile instance using the name of the
TaskObject.
Insert details of that StructuredFile into the ShREEKTask in
the taskObject so that it can function as an executable.
"""
def __call__(self, taskObject):
"""
_operator()_
Act on Task Object to Generate a Main script and insert
details into the ShREEKTask
"""
scriptName = "%s-main.sh" % taskObject['Name']
script = taskObject.addStructuredFile(scriptName)
script.setExecutable()
script.append("#!/bin/bash")
script.append("echo \"Task Running: %s\"" % taskObject['Name'])
script.append("echo \"From Dir: `pwd`\"" )
script.append("echo \"Started: `date +%s`\"" )
taskObject['Executable'] = scriptName
taskObject['ShREEKTask'].attrs['Executable'] = scriptName
return
# The following code displays a message(s) about the acidity of a solution:
# ph = float(input("Enter the ph level: "))
# if ph < 7.0:
# print("It's acidic!")
# elif ph < 4.0:
# print("It's a strong acid!")
def acid_test(ph):
if ph < 7.0:
print("It's acidic!")
elif ph < 4.0:
print("It's a strong acid")
# a. What message(s) are displayed when the user enters 6.4?
acid_test(6.4)
# b. What message(s) are displayed when the user enters 3.6?
acid_test(3.6)
# c. Make a small change to one line of the code so that both messages
# are displayed when a value less than 4 is entered.
def acid_test(ph):
if ph < 7.0:
print("It's acidic!")
if ph < 4.0:
print("It's a strong acid")
acid_test(3.9)
UTF-8
Python
false
false
2,013
5,970,004,567,928
b183833d1be7458b48b416d702b3edbea4802e2c
8fdcf5600565d44931013553a3edf1b41047cb3d
/src/noclobberdict.py
fc0d6d9cc9d68ff4af419758897ae4c4501ba7a5
[]
no_license
gberriz/datarail-2.0
https://github.com/gberriz/datarail-2.0
b310720c4f3054f3078a2e7cd892d184924324e4
4a6d132f2faa1e2f0e16360a9aefa6b5cd0c5a6b
refs/heads/master
2021-01-10T11:21:20.763195
2012-03-01T21:07:09
2012-03-01T21:07:09
1,007,001
0
1
null
null
null
null
null
null
null
null
null
null
null
null
null
class NoClobberDict(dict):
"""
A dictionary whose keys may be assigned to at most once.
"""
def __setitem__(self, key, value):
"""
Assign value to self[key].
If self[key] exists and is not equal to value, raise a
ValueError.
"""
if key in self:
v = self[key]
if v != value:
raise ValueError('key "%s" is already in dictionary, '
'with value %s' % (str(key), str(v)))
else:
super(NoClobberDict, self).__setitem__(key, value)
def update(self, d=None, **kw):
"""
Update this dictionary with the values in d and **kw.
The setting raises an exception if the updating would clobber
an existing value.
"""
if not d is None:
if hasattr(d, 'items'):
items = d.items()
else:
items = d
for k, v in items:
self[k] = v
for k, v in kw.items():
self[k] = v
if __name__ == '__main__':
import unittest
class Tests(unittest.TestCase):
def test_setitem(self):
"""Test that new values can't clobber old ones."""
d = NoClobberDict(x=1)
d['x'] = 1
self.assertRaises(ValueError, d.__setitem__, 'x', 2)
def test_equality_test(self):
"""Tests that equality (not identity) is the only criterion
to test for for clobbering."""
d = NoClobberDict()
d['x'] = []
d['x'] = []
self.assertRaises(ValueError, d.__setitem__, 'x', [1])
d['y'] = None
d['y'] = None
def test_update(self):
"""Test that update won't clobber."""
d = NoClobberDict(x=1)
d.update({'x': 1})
d.update(x=1)
self.assertRaises(ValueError, d.update, {'x': 2})
self.assertRaises(ValueError, d.update, x=2)
print "running tests"
unittest.main()
UTF-8
Python
false
false
2,012
3,994,319,589,604
e58f4e7ee28c16396e990b5bebb87850c49a21ca
844390bdb77a4f6ad023ad182530f02256c78f43
/Functions.py
d867d6795dea251ed0445b3f284a77a1b6db0514
[]
no_license
stal888/DropServ
https://github.com/stal888/DropServ
9b9235911eab5023031e2f285eaf8607c744b491
238c27263aa2a028ca060e52590aa50f6e88f908
refs/heads/master
2016-09-03T01:04:18.154895
2012-07-22T20:07:52
2012-07-22T20:07:52
3,645,666
0
0
null
false
2012-07-22T19:47:31
2012-03-07T04:13:39
2012-07-22T19:47:30
2012-07-22T19:47:30
176
null
null
null
Python
null
null
# DropServ.
# This code is copyright (c) 2011 - 2012 by the PyBoard Dev Team <[email protected]>
# All rights reserved.
from __future__ import division
import time
import hashlib
import random
import threading
import math
import os
import copy
from collections import deque
from pystache import Renderer
allchars = "abcdefghijklmnopqrstuvwxyzABCDEFGHJKLMNOPQRSTUVWXYZ123456789"
class Functions(object):
"""
Documentation is for losers
"""
def __init__(self, PyBoard):
self.instance = PyBoard
self.TemplateCache = deque()
self.TemplateConstants = None
self._refreshConstants()
self.file_locks = {};
print(self.instance.lang["FUNC_LOADED"])
def file_size(self, num):
kb = num / 1024
if kb > 1000:
mb = kb / 1024
return "{0:03.2f} MB".format(mb)
else:
return "{0:03.2f} KB".format(kb)
def genAuthToken(self, user, origin):
while True:
sid = self.mkstring(5)
if sid not in self.instance.Sessions:
break
times = int(math.floor(time.time()))
token = hashlib.sha1(user["email"] + origin + self.instance.conf["LoginSalt"] + str(times)).hexdigest()
self.instance.Sessions[sid] = (user["email"], times)
for x, v in self.instance.Sessions.items():
if times - v[1] >= 86400:
del self.instance.Sessions[x]
return "|".join([sid, token])
def hashPassword(self, password, salt=None):
if salt == None:
salt = self. mkstring(len(password))
elif salt == "":
return hashlib.sha512(password).hexdigest()
else:
salt = str(salt)
if len(salt) != len(password):
return ("*", salt)
saltedPass = "".join(map(lambda x, y: x + y, password, salt))
hashed = hashlib.sha512(saltedPass).hexdigest()
return (hashed, salt)
def mkstring(self, length):
s = ""
for x in range(length):
if x == 2:
s += "l"
else:
s += random.choice(allchars)
return s
def page_format(self, v={}, template=None, TemplateString="", root=None):
"""Format pages (obv)"""
temp = None
if root == None:
root = self.instance.workd + "/templates"
if template != None:
if len(self.TemplateCache) >= 5:
self.TemplateCache.popleft()
for item in copy.copy(self.TemplateCache):
if item[0] == template:
if os.path.getmtime("{0}/{1}".format(root, template)) > item[2]:
self.TemplateCache.remove(item)
break
else:
temp = item[1]
break
if not temp:
if template not in self.file_locks:
self.file_locks[template] = threading.RLock()
self.file_locks[template].acquire()
try:
with open(root + "/{0}".format(template), "r") as plate:
temp = plate.read()
self.TemplateCache.append((template, temp, time.time()))
self.file_locks[template].release()
except IOError:
if template in self.file_locks:
self.file_locks[template].release()
del self.file_locks[template]
return ""
elif TemplateString != "":
temp = TemplateString
else:
return ""
for x in v:
if isinstance(v[x], basestring):
try:
v[x] = v[x].decode("utf-8")
except:
pass
formatted = Renderer().render(temp, self.instance.lang.getDict, v, constant=self.TemplateConstants)
return formatted.encode("utf-8")
def read_faster(self, file, close=True):
while True:
c = file.read(16*4096)
if c:
yield c
else:
break
if close:
file.close()
return
def _refreshConstants(self):
self.TemplateConstants = {
"version": self.instance.conf["__version"],
"root": ("/{0}".format(self.instance.conf["Subfolder"].strip("/"))) if self.instance.conf["Subfolder"].strip("/") else "",
}
self.TemplateConstants["static"] = "{0}/static".format(self.TemplateConstants["root"])
def verifyLogin(self, crumb, origin):
pair = crumb.split('|')
if pair[0] not in self.instance.Sessions:
return None
elif hashlib.sha1(self.instance.Sessions[pair[0]][0] + origin + self.instance.conf["LoginSalt"] + str(self.instance.Sessions[pair[0]][1])).hexdigest() == pair[1]:
return True
else:
return None
#!/usr/bin/env python
# Copyright 2010 University of Chicago
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Demonstrate API calls.
Example run using standard globus toolkit certificate locations:
python example.py USERNAME -k ~/.globus/userkey.pem -c ~/.globus/usercert.pem
"""
import time
from datetime import datetime, timedelta
import traceback
from globusonline.transfer.api_client import Transfer, create_client_from_args
# TransferAPIClient instance.
api = None
def tutorial():
"""
Do a bunch of API calls and display the results. Does a small transfer
between tutorial endpoints, but otherwise does not modify user data.
Uses module global API client instance.
"""
# See what is in the account before we make any submissions.
print "=== Before tutorial ==="
display_tasksummary(); print
display_task_list(); print
display_endpoint_list(); print
# auto activate the endpoint, and display before/after.
display_activation("go#ep1")
display_activation("go#ep2")
print "=== Before transfer ==="
display_ls("go#ep1"); print
display_ls("go#ep2"); print
# submit a transfer
code, message, data = api.transfer_submission_id()
submission_id = data["value"]
deadline = datetime.utcnow() + timedelta(minutes=10)
t = Transfer(submission_id, "go#ep1", "go#ep2", deadline)
t.add_item("/~/.bashrc", "/~/api-example-bashrc-copy")
code, reason, data = api.transfer(t)
task_id = data["task_id"]
# see the new transfer show up
print "=== After submit ==="
display_tasksummary(); print
display_task(task_id); print
# wait for the task to complete, and see the summary and lists
# update
if wait_for_task(task_id):
print "=== After completion ==="
display_tasksummary(); print
display_task(task_id); print
display_ls("go#ep2"); print
def display_activation(endpoint_name):
print "=== Endpoint pre-activation ==="
display_endpoint(endpoint_name)
print
code, reason, result = api.endpoint_autoactivate(endpoint_name,
if_expires_in=600)
if result["code"].startswith("AutoActivationFailed"):
print "Auto activation failed, ls and transfers will likely fail!"
print "result: %s (%s)" % (result["code"], result["message"])
print "=== Endpoint post-activation ==="
display_endpoint(endpoint_name)
print
def display_tasksummary():
code, reason, data = api.tasksummary()
print "Task Summary for %s:" % api.username
for k, v in data.iteritems():
if k == "DATA_TYPE":
continue
print "%3d %s" % (int(v), k.upper().ljust(9))
def display_task_list(max_age=None):
"""
@param max_age: only show tasks requested at or after now - max_age.
@type max_age: timedelta
"""
kwargs = {}
if max_age:
min_request_time = datetime.utcnow() - max_age
# filter on request_time starting at min_request_time, with no
# upper limit on request_time.
kwargs["request_time"] = "%s," % min_request_time
code, reason, task_list = api.task_list(**kwargs)
print "task_list for %s:" % api.username
for task in task_list["DATA"]:
print "Task %s:" % task["task_id"]
_print_task(task)
def _print_task(data, indent_level=0):
"""
Works for tasks and subtasks, since both have a task_id key
and other key/values are printed by iterating through the items.
"""
indent = " " * indent_level
indent += " " * 2
for k, v in data.iteritems():
if k in ("DATA_TYPE", "LINKS"):
continue
print indent + "%s: %s" % (k, v)
def display_task(task_id, show_subtasks=True):
code, reason, data = api.task(task_id)
print "Task %s:" % task_id
_print_task(data, 0)
if show_subtasks:
code, reason, data = api.subtask_list(task_id)
subtask_list = data["DATA"]
for t in subtask_list:
print " subtask %s:" % t["task_id"]
_print_task(t, 4)
def wait_for_task(task_id, timeout=120):
status = "ACTIVE"
while timeout and status == "ACTIVE":
code, reason, data = api.task(task_id, fields="status")
status = data["status"]
time.sleep(1)
timeout -= 1
if status != "ACTIVE":
print "Task %s complete!" % task_id
return True
else:
print "Task still not complete after %d seconds" % timeout
return False
def display_endpoint_list():
code, reason, endpoint_list = api.endpoint_list(limit=100)
print "Found %d endpoints for user %s:" \
% (endpoint_list["length"], api.username)
for ep in endpoint_list["DATA"]:
_print_endpoint(ep)
def display_endpoint(endpoint_name):
code, reason, data = api.endpoint(endpoint_name)
_print_endpoint(data)
def _print_endpoint(ep):
name = ep["canonical_name"]
print name
if ep["activated"]:
print " activated (expires: %s)" % ep["expire_time"]
else:
print " not activated"
if ep["public"]:
print " public"
else:
print " not public"
if ep["myproxy_server"]:
print " default myproxy server: %s" % ep["myproxy_server"]
else:
print " no default myproxy server"
servers = ep.get("DATA", ())
print " servers:"
for s in servers:
uri = s["uri"]
if not uri:
uri = "GC endpoint, no uri available"
print " " + uri,
if s["subject"]:
print " (%s)" % s["subject"]
else:
print
def unicode_(data):
"""
Coerce any type to unicode, assuming utf-8 encoding for strings.
"""
if isinstance(data, unicode):
return data
if isinstance(data, str):
return unicode(data, "utf-8")
else:
return unicode(data)
def display_ls(endpoint_name, path=""):
code, reason, data = api.endpoint_ls(endpoint_name, path)
# Server returns canonical path; "" maps to the users default path,
# which is typically their home directory "/~/".
path = data["path"]
print "Contents of %s on %s:" % (path, endpoint_name)
headers = "name, type, permissions, size, user, group, last_modified"
headers_list = headers.split(", ")
print headers
for f in data["DATA"]:
print ", ".join([unicode_(f[k]) for k in headers_list])
if __name__ == '__main__':
api, _ = create_client_from_args()
tutorial()
#!/usr/bin/python
#J.HE
'''Descp.: Given binding site sequence infor, search the genomic coordinaites in
DNA sequanece, get the coordinate of miRNA binding sites for each gene'''
import sys,getopt
import re
from collections import defaultdict
from searchStr import bruteSearch
from searchStr import searchUTR
argv = sys.argv[1:]
input = ''
output = ''
usage = ""
example = ""
try:
opts,args = getopt.getopt(argv,"hc:d:o:")
except getopt.GetoptError:
print usage + "\n" + example
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print usage + "\n" + example
sys.exit()
elif opt in ("-c"):
cupidseq = arg
elif opt in ("-d"):
dnaseq = arg
elif opt in ("-o"):
output = arg
print('Script path:\t'+ sys.argv[0])
print('Input file:\t' + cupidseq)
print('Input file:\t'+ dnaseq)
print('Output file:\t'+ output )
##load all cupidseq
bsSeqDict = defaultdict(list)
with(open(cupidseq)) as f:
line = f.readline()
line = f.readline()
while line:
gene, bsseqinfo = line.strip().split("\t")
for x in bsseqinfo.split(";"):
bsSeqDict[gene].append(x.lower())
line = f.readline()
print "binding seq loaded"
# print bsSeqDict.items()[1]
##process DNA seq by gene
def find_all(qstr, allstr):
start=0
while True:
start = allstr.find(qstr, start)
if start == -1: return
yield start
start += len(qstr)
outputH = open(output, 'w')
outputH.write("Symbol\tChr:bsStart-bsEnd\n")
cnt = 0
with(open(dnaseq)) as f:
line = f.readline()
while line:
cnt = cnt + 1
if cnt % 1000 == 0 :
print " %s line processed" % cnt
if not re.match("^Symbol",line):
gene, coord, seq = line.strip().split("\t")
chrom, tss, tse = re.split(":|-", coord)
if bsSeqDict.get(gene, ''):
outbss = []
for bsseq in bsSeqDict[gene]:
bsstart, querySeq = bsseq.split(":")
bsstart = int(bsstart)
for bsindex in searchUTR(querySeq, seq):
outbss.append(int(tss) + bsindex + bsstart )
outRec = gene + "\t" + chrom + ":" + \
";".join(map(str, list(set(outbss))))
outputH.write(outRec + "\n" )
del bsSeqDict[gene]
line = f.readline()
outputH.close()
UTF-8
Python
false
false
2,014
15,693,810,534,726
ca2171082e79b7958ccdfbe3d40006de1a10c690
59387662fba5de9d20209d855e688266aabe4961
/demo/urls.py
6240986a5a6fc05c312c11e9b876072cbd56b685
[
"BSD-2-Clause"
]
permissive
jordic/django_tiny_shop
https://github.com/jordic/django_tiny_shop
b41eac9f3d0d12b378359817d65ac34c9977a676
46dd4c1c2e3fdf96676c3d02ad197b7cecff6bc3
refs/heads/master
2021-01-18T22:01:29.712718
2014-12-01T05:45:37
2014-12-01T05:45:37
3,014,559
4
1
null
false
2018-06-08T10:48:15
2011-12-19T19:51:47
2017-09-25T19:39:20
2016-04-19T06:02:30
588
4
1
0
JavaScript
false
null
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Autor: jordi collell <[email protected]>
# http://tempointeractiu.cat
# -------------------------------------------------------------------
'''
'''
from django.conf.urls.defaults import patterns, include, url
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^checkout/paypal/ipn', include('paypal.standard.ipn.urls')),
url(r'', include('shop.urls')),
url(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
(r'^media/admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': './media/admin/'}),
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root':'./media/'}),
)
UTF-8
Python
false
false
2,014
8,297,876,822,675
fb163ea1ba1d790c1eaea50bcaf62aab99c29758
76288367bd583fe05faf79759e1fbdb8b66def2e
/01_Part1/handout/code/mapper_template.py
72a08ca48152056dcdb70826473f3d31901c388c
[]
no_license
parijitkedia/eth-datamining
https://github.com/parijitkedia/eth-datamining
7a89a5790697ba4bef23a9c1dc110f30fa901abf
80360563e28829af15bed9c1c888ad66d8769e13
refs/heads/master
2021-05-28T11:16:09.391042
2014-06-10T20:55:46
2014-06-10T20:55:46
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/env python
import numpy as np
import sys
def emit(key, value):
# write to stdout
print(key + '\t' + value)
def getMinHashSignature(shingles, hash_fns):
#print("number of hash fns: " + str(len(hash_fns)))
M = len(hash_fns) * [int(max(shingles))+100]
for row in range(int(max(shingles))+1):
if row in shingles:
#print("Video has shingle " + str(row))
for i,hash_fn in enumerate(hash_fns):
#print('hashfn: ' + str(hash_fn))
M[i] = min(M[i], h(hash_fn,row))
#print(M)
return M
def partition(value, shingles, R, B, hash_fns):
M = getMinHashSignature(shingles, hash_fns);
for b in range(B):
key = ''
for r in range(R):
row = b*R+r;
key += str(M[row])
emit(key, value)
def h(permutator, row):
return (permutator[0] * row + permutator[1]) % permutator[2]
def get_permutation_descriptor(size):
a = np.random.randint(size)
b = np.random.randint(size)
return (a,b,size)
if __name__ == "__main__":
# Very important. Make sure that each machine is using the
# same seed when generating random numbers for the hash functions.
np.random.seed(seed=42)
# Configuration
num_features = 10000;
t = 0.85
n = 256; # number of hashes
# B and R will produce threshhold of 0.8. Giving more FP.
# This produces "only more work"
B = 16;
R = 16;
# Generate hash functions
hash_sigs = []
for i in range(R*B):
hash_sigs.append( get_permutation_descriptor(num_features) )
for line in sys.stdin:
line = line.strip()
video_id = int(line[6:15])
shingles = line[16:].split()
value = str(video_id) + " " + line[16:]
shingles = np.fromstring(line[16:], sep=" ")
partition(value, shingles, R, B, hash_sigs)
#print("-----")
#print("Config: R=" + str(R) + " B=" + str(B))
UTF-8
Python
false
false
2,014
2,800,318,694,059
24ab983f3b75101bf2a483ca106ceba60c39abde
bf574bc57fb27a8a92a8b585d37b43d7ad4329c2
/rubiks_solver.py
e29bbc54bcebc8ba4738de198b7db3b03ba5abf0
[]
no_license
rufpierre/rubiks_solver
https://github.com/rufpierre/rubiks_solver
c8a53abb0aa4c3d4a1ecd6082f053e200ddcb4d0
cfea97f07a48c48c671bc9dea7bea7a324a2d560
refs/heads/master
2021-01-20T07:10:28.825636
2014-03-16T16:26:41
2014-03-16T16:26:41
17,499,749
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from engine import *
from collections import Counter
help = """f,r,b,l,d,t for standard transformations
F,R,B,L,D,T for reverse transformations
z to reset the cube
q to quit
"""
# clean screen
print("\033c")
# init the display
print(help)
print_cube(cube)
# choose between CLI mode or static mode
cli = True
#==============================================================================
# en cours:
# essayer de comprendre les genes et les organismes
# bidouiller l exemple que jai recopié dans le repertoire (le duppliquer)
# pour l amener vers un gene string au lieu de float
#
# en cours 2
# ai fait une seule classe de gene (pas d heritage)
# il faut minimiser la fonction de fitness (donc l inverser)
# bien comprendre comment marche le truc avec les floats
#==============================================================================
# fitness of the cube
def variance(x):
"""variance = 1/(n-1) * sum(1,n,(x-m)²)"""
m = float(sum(x))/len(x)
return sum([(e-m)**2 for e in x])/(len(x)-1)
def dist(x):
"""distribution of a list of values"""
c = Counter(x)
return [c[e] for e in sorted(c)]
def dist2(x):
"""distribution of the colors on a cube facet"""
return [x.count(e) for e in range(6)]
def fitness(cube):
"""fitness of the cube is the sum of the variances for each facet of the cube
max score multiplied by -1.
cube in its initial state is -81 wich is the lowest and best score
'cube in a cube' is -33"""
return -sum([variance(dist2(cube[facet])) for facet in range(6)])
# static mode
if (not cli):
rand_inst = "flfTrtffllTLbDBllt"
pons_asinorum = "F2 B2 R2 L2 U2 D2"
cube_in_a_cube = "F L F U' R U F2 L2 U' L' B D' B' L2 U"
cube = chain(cube_in_a_cube, cube)
print_cube(cube)
print(fitness(cube))
# CLI mode
while cli:
raw_in = raw_input()
# quit
if raw_in == 'q':
# clean screen
print("\033c")
# exit the loop
break
# put back the cube in its initial state
elif raw_in == 'z':
cube = original_cube
print("\033c")
print(help)
print_cube(cube)
# execute the string of commands
else:
cube = chain(raw_in, cube)
print(help)
print_cube(cube)
print(fitness(cube))
from django.contrib.auth.models import models
from django import forms as forms
from django.contrib.auth import authenticate, login, logout
class User(models.Model):
email = models.TextField(max_length=50)
username = models.TextField(max_length=30)
password = models.TextField(max_length=50)
date_joined = models.DateTimeField(auto_now_add=True)
import pymouse
import time
import sys
class Move(object):
"""
Basic structure representing a move.
Arguments:
name -- String : Name of the move
descr -- [(Integer, Integer)] : Temporal list of (dx, dy) representing the
movement.
"""
def __init__(self, name, descr):
assert len(descr) >= 1
super(Move, self).__init__()
self.name = name
self.descr = descr
def __str__(self):
"""
String representation of the move.
Returns:
String representation
"""
pos_str = map(lambda p: '%d %d' % p, self.descr)
return '%s %s' % (self.name, ' '.join(pos_str))
@classmethod
def from_string(cls, string):
"""
Construct a *Move* from a string.
Arguments:
string -- Input string to transfom into Move
Returns:
The constructed move
Raises:
ValueError : When the string format is not good
"""
words = string.split(' ')
if len(words) < 3:
raise ValueError('A move have to contain a minimum of a name and one position.')
if len(words) % 2 != 1:
raise ValueError('Expected one more integer')
name = words[0]
try:
ints = map(int, words[1:])
except ValueError as e:
raise e
couples = zip(ints[::2], ints[1::2])
return cls(name, couples)
def save(self, file=sys.stdout):
"""
Write the moves into the file *file*
Arguments:
file -- File : File to write in
Raises:
IOError : When it's impossible to write into the file
"""
try:
file.write(str(self) + '\n')
except IOError:
raise
def acquire_move(size, time_sleep=0.005):
"""
Get a mouse move with a size of *size* points.
Arguments:
size -- Integer : The number of position taken for the move
time_sleep -- Real : Time to sleep between taking the positions (default
0.005)
Returns:
[Real] : A list of size *size* containing the moves (dx, dy).
"""
mouse = pymouse.PyMouse()
o = mouse.position()
move = []
for _ in xrange(size):
pos = mouse.position()
dx = pos[0] - o[0]
dy = pos[1] - o[1]
move.append((dx, dy))
time.sleep(time_sleep)
return move
def wait_mouse_move(static_threashold=20):
"""
Wait and block until the mouse move by *static_threashold*.
Arguments:
static_threashold -- Real : Distance the mouse has to move (default 20)
"""
mouse = pymouse.PyMouse()
o = mouse.position()
while abs(mouse.position()[0] - o[0]) + abs(mouse.position()[1] - o[1]) \
< static_threashold:
time.sleep(0.01)
if __name__ == '__main__':
cont = True
moves = []
print 'Move name ?',
name = raw_input()
while cont:
print 'Waiting the beginning of the move...'
wait_mouse_move()
print 'Recording the move...'
move = Move(name, acquire_move(100))
print 'Keep it ? (y/n)',
if raw_input() == 'y':
moves.append(move)
print 'Continue ? (y/n)',
cont = raw_input() == 'y'
if moves:
_f_name = name.lower() + '.mv'
print 'Save moves into ? [%s]' % _f_name,
f_name = raw_input()
if not f_name:
f_name = _f_name
print 'Saving into %s...' % f_name,
with open(f_name, 'w+') as f:
for m in moves:
m.save(f)
print 'OK'
UTF-8
Python
false
false
2,014
11,012,296,163,839
0ae5c587a42e8d81deb7e1f6c9c15d0b586937ff
7a27cc67bed96eb14eac5d12c1d519df44b1f1d4
/Standardbib/umbenenner.py
a9709c05c0b2f0248b227f84411d07a0405e4e92
[]
no_license
DerNerger/Python
https://github.com/DerNerger/Python
b24bcfc2f5d331182f8ba86ccc5afdc3f8a2e47d
43881d7460505cd096ff851bd10e7e2fe48c0f3f
refs/heads/master
2021-01-23T22:11:04.247761
2014-05-23T12:27:13
2014-05-23T12:27:13
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import os
for filename in os.listdir(os.getcwd()):
if filename.endswith(".htm") :
os.rename(os.path.join(os.getcwd(),filename), os.path.join(os.getcwd(),filename+"l"))
#!/usr/bin/env python
import xml.dom.minidom
import datetime
# This class is used for generating PERI xml
class PERIxml:
identifier = 0
def __init__(self):
self.build = None
self.run = None
self.genPERIheader()
# what function is used to create each type
self.periCreateMap = {}
self.periCreateMap["peri:resourceSet"] = "createResourceSet"
self.periCreateMap["peri:resource"] = "createResource"
self.periCreateMap["peri:nodeList"] = "createNodeList"
self.periCreateMap["peri:node"] = "createNode"
self.periCreateMap["peri:memory"] = "createMemory"
self.periCreateMap["peri:cpu"] = "createCpu"
# what function is used to add an attribute to each type
self.periAttrMap = {}
self.periAttrMap["peri:memory"] = "setMemoryAttribute"
self.periAttrMap["peri:nodeList"] = "setNodeListAttribute"
self.periAttrMap["peri:node"] = "setNodeAttribute"
self.periAttrMap["peri:cpu"] = "setCpuAttribute"
# when we look up elements, what type is their "name"
self.tagMap = {}
self.tagMap['peri:cpu'] = ("attribute","index")
self.tagMap['peri:memory'] = (None,"")
self.tagMap['peri:runrules'] = (None,"")
self.tagMap['peri:resourceSet'] = (None,"")
self.tagMap['peri:resource'] = (None,"")
self.tagMap['peri:nodeList'] = (None,"")
self.tagMap['peri:node'] = ("element","peri:nodeName")
def getRoot(self):
return self.doc
def PERI_nvp(self, name, value, parent):
nv = self.doc.createElement("peri:nvp")
n = self.doc.createAttribute("name")
n.value = name
v = self.doc.createTextNode(value)
nv.appendChild(v)
nv.setAttributeNode(n)
if parent:
parent.appendChild(nv)
def PERI_person(self, parent=None, userName=None, realName=None, email=None, phone=None, group=None):
person = self.doc.createElement("peri:person")
if userName:
user = self.doc.createElement("user")
name = self.doc.createTextNode(userName)
user.appendChild(name)
person.appendChild(user)
if realName:
user = self.doc.createElement("realName")
name = self.doc.createTextNode(realName)
user.appendChild(name)
person.appendChild(user)
if email:
user = self.doc.createElement("email")
name = self.doc.createTextNode(email)
user.appendChild(name)
person.appendChild(user)
if phone:
user = self.doc.createElement("phone")
name = self.doc.createTextNode(phone)
user.appendChild(name)
person.appendChild(user)
if group:
user = self.doc.createElement("group")
name = self.doc.createTextNode(group)
user.appendChild(name)
person.appendChild(user)
if parent:
parent.appendChild(person)
return person
def PERI_operatingSystem(self, osName, osVersion, osRelease, parent):
os = self.doc.createElement("peri:operatingSystem")
n = self.doc.createElement("name")
name = self.doc.createTextNode(osName)
n.appendChild(name)
os.appendChild(n)
if osVersion:
v = self.doc.createElement("version")
vers = self.doc.createTextNode(osVersion)
v.appendChild(vers)
os.appendChild(v)
if osRelease:
details = self.doc.createElement("peri:details")
self.PERI_nvp("release type", osRelease, details)
os.appendChild(details)
if parent:
parent.appendChild(os)
return os
def PERI_time(self, time=None, parent=None):
timeElem = self.doc.createElement("peri:time")
t = self.doc.createAttribute("value")
if time:
t.value = time
else:
t.value = (str(datetime.datetime.isoformat(datetime.datetime.now())),'.')[0]
timeElem.setAttributeNode(t)
if parent:
parent.appendChild(timeElem)
return timeElem
def PERI_file(self, value, which, parent):
fc = self.doc.createElement("peri:file")
if parent:
parent.appendChild(fc)
if which == "abspath":
f = self.doc.createAttribute("abspath")
f.value = value
fc.setAttributeNode(f)
elif which == "path-filename":
f = self.doc.createAttribute("path")
f.value = value
fc.setAttributeNode(f)
f = self.doc.createAttribute("filename")
parts = value.split('/') # extract the filename portion of the path
f.value = parts[len(parts)-1]
fc.setAttributeNode(f)
return fc
def genPERIheader(self):
# get the XML document ready and initialized
self.doc = xml.dom.minidom.Document()
self.rootElem = self.doc.createElement("peri:runrules")
self.doc.appendChild(self.rootElem)
# set namespace
ns = self.doc.createAttribute("xmlns:peri")
ns.value = "http://peri-scidac.org/"
self.rootElem.setAttributeNode(ns)
# create id
id = self.doc.createAttribute("id")
id.value = str(self.identifier)
self.identifier += 1
self.rootElem.setAttributeNode(id)
# add time element to root
self.PERI_time(None, self.rootElem)
def createRun(self, name=None, parent=None):
# create a peri:run element, if a parent element is sent in, we will use
# that, otherwise, the run element becomes a child of root
self.run = self.doc.createElement("peri:run")
if parent:
parent.appendChild(self.run)
else:
self.rootElem.appendChild(self.run)
return self.run
def getRun(self):
return self.run
def createBuild(self, name=None, parent=None):
# create peri:transformationSet and peri:transformation elements,
# we are modeling the build as a transformation of type compile/link
# if a parent element is sent in, we will use
# that, otherwise, the run element becomes a child of root
transE = self.doc.createElement("peri:transformationSet")
self.build = self.doc.createElement("peri:transformation")
transE.appendChild(self.build)
# transformation type
type = self.doc.createElement("type")
ty = self.doc.createTextNode("compile/link")
type.appendChild(ty)
self.build.appendChild(type)
if parent:
parent.appendChild(transE)
else:
self.rootElem.appendChild(transE)
return self.build
def getBuild(self):
return self.build
def createCompiler(self, name, parent=None):
# create a compiler, we are modeling compilers as a peri:resource
# if a parent is sent in, we will use that,
# otherwise, the compiler is a child of the build
compiler = self.doc.createElement("peri:resource")
type = self.doc.createElement("type")
t = self.doc.createTextNode("compiler")
type.appendChild(t)
compiler.appendChild(type)
nme = self.doc.createElement("name")
n = self.doc.createTextNode(name)
nme.appendChild(n)
compiler.appendChild(nme)
if parent:
parent.appendChild(compiler)
else:
self.build.appendChild(compiler)
return compiler
#def setCompilerName(self, compiler, CompilerName):
#nme = self.doc.createElement("name")
#n = self.doc.createTextNode(CompilerName)
#nme.appendChild(n)
#compiler.appendChild(nme)
def setCompilerAttribute(self, compiler, nme, val):
E = self.doc.createElement(nme)
e = self.doc.createTextNode(val)
E.appendChild(e)
compiler.appendChild(E)
def createLibrarySet(self, name=None, parent=None):
#model library set as resource
res = self.doc.createElement("peri:resource")
libs = self.doc.createElement("peri:libraries")
res.appendChild(libs)
if parent:
parent.appendChild(res)
return libs
def createLibrary(self, name, parent):
lib = self.doc.createElement("peri:library")
self.PERI_file(name, "path-filename", lib)
if parent:
parent.appendChild(lib)
return lib
def setLibraryAttribute(self, lib, name, val):
type = self.doc.createElement(name)
t = self.doc.createTextNode(val)
type.appendChild(t)
lib.appendChild(type)
def createTime(self,value, parent):
time = self.PERI_time(value,parent)
return time
def createApplication(self, AppName, parent):
# create a peri:program element, enclosed in a peri:resource
res = self.doc.createElement("peri:resource")
prog = self.doc.createElement("peri:program")
self.PERI_nvp("name", AppName, prog)
res.appendChild(prog)
parent.appendChild(res)
return prog
def setApplicationAttribute(self, app, name, val):
self.PERI_nvp(name, val, app)
def createPerson(self, userName, parent):
# modeling a person as a resource
res = self.doc.createElement("peri:resource")
person = self.PERI_person( res, userName)
if parent:
parent.appendChild(res)
return person
def createEnvironment(self, name=None, parent=None):
env = self.doc.createElement("peri:environment")
parent.appendChild(env)
return env
def setEnvironmentAttribute(self,env, nme, val):
self.PERI_nvp(nme, val, env)
def createExecutable(self, exeName, parent):
# modeling the executable as an output file of the build transformation
oSet = self.doc.createElement("peri:outputs")
file = self.PERI_file(exeName, "abspath", oSet)
if parent:
parent.appendChild(oSet)
return file
def createMachineNode(self, nodeName, parent):
# the machine is also a resource element
res = self.doc.createElement("peri:resource")
node = self.createNode(nodeName, res)
if parent:
parent.appendChild(res)
return node
def createOperatingSystem(self, OSName, parent):
# the build doesn't have an OS in it, so we model it as a
# resource element. However, the run does have an OS,
# so we don't need a resource element
if parent == self.build:
res = self.doc.createElement("peri:resource")
newParent = res
parent.appendChild(newParent)
else:
newParent = parent
os = self.doc.createElement("peri:operatingSystem")
n = self.doc.createElement("name")
name = self.doc.createTextNode(OSName)
n.appendChild(name)
os.appendChild(n)
if newParent:
newParent.appendChild(os)
return os
def setOperatingSystemAttribute(self, os, name, value):
if name == "version":
v = self.doc.createElement(name)
vers = self.doc.createTextNode(value)
v.appendChild(vers)
os.appendChild(v)
elif name == "release type":
details = self.doc.createElement("peri:details")
self.PERI_nvp(name, value, details)
os.appendChild(details)
def createProgram(self, name, parent):
prog = self.doc.createElement("peri:program")
n = self.doc.createElement("name")
v = self.doc.createTextNode(name)
n.appendChild(v)
prog.appendChild(n)
if parent:
parent.appendChild(prog)
return prog
def setProgramAttribute(self, prog, name, value):
if name == "version":
v = self.doc.createElement(name)
ver = self.doc.createAttribute("number")
ver.value = value
v.setAttributeNode(ver)
prog.appendChild(v)
def createScheduler(self, name, parent):
sched = self.doc.createElement("peri:scheduler")
set = self.doc.createElement("peri:settings")
self.PERI_nvp("name", name, set)
sched.appendChild(set)
if parent:
parent.appendChild(sched)
return sched
def setSchedulerAttribute(self, sched, name, value):
if name == "version" and value != "":
[set] = sched.getElementsByTagName("peri:settings")
self.PERI_nvp("version", value, set)
def createQueue(self,name=None, parent=None):
queue = self.doc.createElement("peri:queueContents")
if parent:
parent.appendChild(queue)
return queue
def createSchedulerJob(self, name=None, parent=None):
job = self.doc.createElement("peri:schedulerJob")
if parent:
parent.appendChild(job)
return job
def setSchedulerJobAttribute(self, job, name, value):
if name == "jobid":
jobid = self.doc.createElement("jobid")
id = self.doc.createAttribute("id")
id.value = value
jobid.setAttributeNode(id)
job.appendChild(jobid)
elif name == "programName":
pgname = self.doc.createElement("programName")
pgn = self.doc.createTextNode(value)
pgname.appendChild(pgn)
job.appendChild(pgname)
elif name == "hoursRunning":
hours = self.doc.createElement("hoursRunning")
if value.find(":") >= 0:
s = ""
if value.count(":") == 2:
h,m,s = value.split(":")
elif value.count(":") == 1:
h,m = value.split(":")
ht = int(h) + float(m)/60.0
if s != "":
ht += float(s)/60.0/60.0
elif value.strip() == "N/A":
ht = 0.0
else:
ht = value
hs = self.doc.createTextNode(str(ht))
hours.appendChild(hs)
job.appendChild(hours)
elif name == "status":
stats = self.doc.createElement("status")
sts = self.doc.createTextNode(value)
stats.appendChild(sts)
job.appendChild(stats)
def createBatchFile(self, batchName, parent):
# batch file is also modeled as a peri:resource
res = self.doc.createElement("peri:resource")
bf = self.doc.createElement("batchFile")
name = self.doc.createElement("name")
n = self.doc.createTextNode(batchName)
name.appendChild(n)
bf.appendChild(name)
res.appendChild(bf)
if parent:
parent.appendChild(res)
return bf
def setBatchFileAttribute(self, batch, name, value):
reses = self.doc.createElement(name)
rs = self.doc.createTextNode(value)
reses.appendChild(rs)
batch.appendChild(reses)
def createFileSystemSet(self, name=None,parent=None):
res = self.doc.createElement("peri:resource")
if parent:
parent.appendChild(res)
return res
def createFileSystem(self, name, parent):
fs = self.doc.createElement("fileSystem")
fsn = self.doc.createElement("name")
n = self.doc.createTextNode(name)
fsn.appendChild(n)
fs.appendChild(fsn)
if parent:
parent.appendChild(fs)
return fs
def setFileSystemAttribute(self, fs, name, value):
if name == "version" and value != "":
fsn = self.doc.createElement(name)
n = self.doc.createTextNode(value)
fsn.appendChild(n)
fs.appendChild(fsn)
def createDevice(self, name, parent):
dev = self.doc.createElement("device")
devn = self.doc.createElement("name")
n = self.doc.createTextNode(name)
devn.appendChild(n)
dev.appendChild(devn)
if parent:
parent.appendChild(dev)
return dev
def addDeviceAttribute(self, dev, name, val):
devn = self.doc.createElement(name)
n = self.doc.createTextNode(val)
devn.appendChild(n)
dev.appendChild(devn)
def createInputs(self, name=None, parent=None):
iset = self.doc.createElement("peri:inputs")
if parent:
parent.appendChild(iset)
return iset
def createFile(self, fullname, parent):
file = self.PERI_file(fullname, "abspath", parent)
return file
def createResourceSet(self, name=None, parent=None):
resSet = self.doc.createElement("peri:resourceSet")
if parent:
parent.appendChild(resSet)
return resSet
def createResource(self, name=None, parent=None):
res = self.doc.createElement("peri:resource")
if parent:
parent.appendChild(res)
return res
def createNodeList(self, name=None, parent=None):
res = self.doc.createElement("peri:nodeList")
if parent:
parent.appendChild(res)
return res
def setNodeListAttribute(self, nl, name, val):
if name == "concurrency":
conc = self.doc.createAttribute(name)
conc.value = val
nl.setAttributeNode(conc)
def createNode(self, nodeName=None, parent=None):
node = self.doc.createElement("peri:node")
name = self.doc.createElement("peri:nodeName")
n = self.doc.createTextNode(nodeName)
name.appendChild(n)
node.appendChild(name)
if parent:
parent.appendChild(node)
return node
def setNodeAttribute(self, node, name, val):
self.PERI_nvp(name, val, node)
def createMemory(self, name=None, parent=None):
mem = self.doc.createElement("peri:memory")
if parent:
parent.appendChild(mem)
return mem
def setMemoryAttribute(self, mem, name, val):
if name == "mainKB":
mainE = self.doc.createElement(name)
main = self.doc.createTextNode(val)
mainE.appendChild(main)
mem.appendChild(mainE)
elif name.find("cacheKB") >= 0:
# a hack... name could be "L1 cacheKB" or "L2 cacheKB"
level = ""
if name.upper().startswith("L"): # tell us the level?
level,name = name.split(" ")
E = self.doc.createElement(name)
if level:
lev = self.doc.createAttribute("level")
lev.value = level
E.setAttributeNode(lev)
e = self.doc.createTextNode(val)
E.appendChild(e)
mem.appendChild(E)
def createCpu(self, name=None, parent=None):
res = self.doc.createElement("peri:cpu")
index = self.doc.createAttribute("index")
index.value = name
res.setAttributeNode(index)
if parent:
parent.appendChild(res)
return res
def setCpuAttribute(self, cpu, name, val):
if name == "MHz":
mhzE = self.doc.createElement(name)
mhz = self.doc.createTextNode(val)
mhzE.appendChild(mhz)
cpu.appendChild(mhzE)
def findElement(self, tagName, parent, ident):
#print "searching for: %s %s %s" % (tagName,parent,ident)
children = self.rootElem.getElementsByTagName(tagName)
for child in children:
if child.parentNode == parent:
if ident == None or ident == "Unknown":
return child # assume only one, so there's no identifier
ret = self.tagMap[tagName]
if ret == None:
#print "%s: not in self.tagMap" % tagName
continue
type,name = ret
if type == None:
return child
elif type == "attribute":
id = child.getAttribute(name)
if id == ident:
return child
elif type == "element":
chldlist = child.childNodes
#print chldlist
for u in chldlist:
#print "NodeName: %s" % u.nodeName
if u.nodeName == name:
if u.childNodes[0].nodeValue == ident:
return child
return None
def createPERIelement(self, nameHier, typeHier):
parent = self.rootElem
nameHier = nameHier.lstrip("/")
typeHier = typeHier.lstrip("/")
for name, type in zip(nameHier.split("/"), typeHier.split("/")):
el = self.findElement(type, parent, name)
if not el:
cFunc = self.periCreateMap[type]
cf = getattr(self,cFunc)
el = cf(name, parent)
parent = el
def addAttribute(self, nameHier, typeHier, attrName, attrValue):
parent = self.rootElem
nameHier = nameHier.lstrip("/")
typeHier = typeHier.lstrip("/")
el = None
elType = ""
for name, type in zip(nameHier.split("/"), typeHier.split("/")):
el = self.findElement(type, parent, name)
if not el:
print "ERROR: could not find parent of: %s " % name
return
parent = el
elType = type
aFunc = self.periAttrMap[elType]
af = getattr(self,aFunc)
af(el, attrName, attrValue)
def writeData(self, fileName):
oF = open(fileName,'w')
oF.write(self.doc.toprettyxml(encoding='utf-8'))
oF.close()
UTF-8
Python
false
false
2,013
19,524,921,345,338
c6ffaa433bc7fc63407e3fb94e42c6994759239b
547aafad1a12f1ca9c2ce0e963839261b2c79abf
/localsms/utils.py
0a715c36b669e1c300faa1eb347cea56b1930eaa
[
"Apache-2.0"
]
permissive
SEL-Columbia/localsms
https://github.com/SEL-Columbia/localsms
9894384a6073cc9b85e77bbbdc069d5d96b56da8
617042be7909e4c34c754ff1d8f5ee47cf70e93e
refs/heads/master
2020-12-24T14:46:04.533376
2011-02-25T13:20:49
2011-02-25T13:20:49
1,034,118
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
import sys
import logging
import datetime
import ConfigParser
import urllib2
from serial.serialutil import SerialException
import pygsm
import httplib2
from localsms.db import ModemLog
def make_logger(config=None,name=None):
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
ch = logging.FileHandler(config.get("app","log_file"))
ch.setLevel(logging.DEBUG)
ch.setFormatter(
logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
log.addHandler(ch)
return log
def ping_remote(config=None,log=None):
"""
Check to see if the remote server is runnning.
"""
try:
response = urllib2.urlopen(
"http://%s:%s/sms/ping" % (
config.get("remote","host"),
config.get("remote","port")))
if response.code == 200: # make sure response is a 200 not 405
return True
else:
return False
except Exception,e:
log.error(e)
return False
def make_modem_log(modem,msg,msgType):
ModemLog(time=str(datetime.datetime.now()),
modem=str(modem),
msg=str(msg),
msgType=str(msgType))
def get_modem(config,log):
try:
log.info("Trying to connect to the modem")
return pygsm.GsmModem(
port=config.get("modem","port"),
logger=make_modem_log,
baudrate=config.get("modem","baudrate"))
except SerialException,e:
log.error("Unable to conntect to the modem %s "% e)
sys.exit(0)
def get_config(path):
config = ConfigParser.RawConfigParser()
config.read(path)
return config
def make_http(config):
h = httplib2.Http()
h.add_credentials(
config.get("remote","username"),
config.get("remote","password"))
return h
UTF-8
Python
false
false
2,011
8,615,704,431,166
f115244db39a0d62a37086a4f21debb47b38c5d3
8fcbc53097d1e468829985fe83a74a2f3f8abf35
/smxasn_invoices.py
f48b935223b9a4a4fb3b6fd4ea54488440829b48
[]
no_license
OrlandoHdz/SmxAsn
https://github.com/OrlandoHdz/SmxAsn
354a09bed5a797115604efabacde822808e34178
a0982ef432a6bfe510730a8451771fd9fb882bd7
refs/heads/master
2020-05-19T09:20:17.818047
2014-05-21T19:10:16
2014-05-21T19:10:16
null
0
0
null
null
null
null
null
null
null
null
null
null
null
null
null
#!/usr/bin/python
import pyodbc
from meritor_pdf import smx_pdf
from ntlm.smtp import ntlm_authenticate
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import json
class invoices(object):
"""
Obtiene las facturas del cliente para enviarlo por correo
Orlando Hdz
09-May-2014
"""
def __init(self):
self.path_pdfs = ""
self.dsn = ""
self.user = ""
self.password = ""
self.database = ""
self.clientes = "0"
self.address_book = []
self.sender = ""
self.smtp_host = ""
self.smtp_usuario = ""
self.smtp_password =""
def envia_mail(self, archivo):
print "enviando mail"
subject = "New Commercial Invoice"
body = "Sisamex, has generated a new commercial invoice \n Attached file"
msg = MIMEMultipart()
msg['From'] = self.sender
msg['To'] = ','.join(self.address_book)
msg['Subject'] = subject
msg.attach(MIMEText(body, 'plain'))
part = MIMEApplication(open(self.path_pdfs + "/" + archivo,"rb").read())
part.add_header('Content-Disposition', 'attachment', filename=archivo)
msg.attach(part)
text=msg.as_string()
connection = smtplib.SMTP(self.smtp_host, 25)
connection.ehlo()
ntlm_authenticate(connection, self.smtp_usuario, self.smtp_password)
connection.sendmail(self.sender,self.address_book, text)
connection.quit()
def run(self):
con_string = 'DSN=%s;UID=%s;PWD=%s;DATABASE=%s;' % (self.dsn, self.user, self.password, self.database)
cnxn = pyodbc.connect(con_string)
cursor_mst = cnxn.cursor()
#obtiene las facturas que tengan registro de vigilancia
#tambien que no se hallan enviado anteriormente
print self.clientes
cursor_mst.execute("""
select
factura, cliente
from
asn_embarque_enca as a
where
a.cliente in (""" + self.clientes + """)
and a.cancelada = 'F'
and a.vigilancia_tiempo is not null
and not exists
( select *
from asn_facturas_enviadas as b
where a.factura = b.factura) """)
rows_mst = cursor_mst.fetchall()
for row_mst in rows_mst:
#obtiene los encabezados
if row_mst.factura > 0:
cursor = cnxn.cursor()
print 'creando factura %d' % row_mst.factura
cursor.execute("""
select
convert(varchar,fecha,103) as fecha,
isnull(nombre,'') as slodto_r1,
isnull(dat1,'') as slodto_r2,
isnull(dat2,'') as slodto_r3,
isnull(dat3,'') as slodto_r4,
isnull(dat4,'') as slodto_r5,
isnull(embarcadoa,'') as shipto_r1,
isnull(emb_dir1,'') as shipto_r2,
isnull(emb_dir2,'') as shipto_r3,
isnull(emb_dir3,'') as shipto_r4,
isnull(emb_dir4,'') as shipto_r5,
isnull(dat1_mex,'') as aduana_r1,
isnull(dat2_mex,'') as aduana_r2,
isnull(dat3_mex,'') as aduana_r3,
isnull(dat4_mex,'') as aduana_r4,
isnull(dat5_mex,'') as aduana_r5,
isnull(dat1_usa,'') as broker_r1,
isnull(dat2_usa,'') as broker_r2,
isnull(dat3_usa,'') as broker_r3,
isnull(dat4_usa,'') as broker_r4,
isnull(dat5_usa,'') as broker_r5,
isnull(embarque_ref,'') as shipping_order,
convert(varchar,fecha,103) as shipping_date,
isnull(transporte,'') as carrier,
isnull(numero_camion,'') as bl_number,
isnull(terminos_vta,'') as commercial_terms,
isnull(pedimento,'') as clave_pedimento,
isnull(peso_um,'') as peso_um,
isnull(moneda,'') as moneda
from
v_factura_reporte
where
seq=1
and factura=? """, row_mst.factura)
row = cursor.fetchone()
pdf = smx_pdf()
if row:
pdf.ruta_destino = self.path_pdfs
pdf.factura = str(row_mst.factura)
pdf.fecha = row.fecha
pdf.sold_to_r1 = row.slodto_r1
pdf.sold_to_r2 = row.slodto_r2
pdf.sold_to_r3 = row.slodto_r3
pdf.sold_to_r4 = row.slodto_r4
pdf.sold_to_r5 = row.slodto_r5
if len(row.shipto_r3) > 40:
row.shipto_r4 = row.shipto_r1[40:len(row.shipto_r3)]
row.shipto_r3 = row.shipto_r1[0:39]
pdf.ship_to_r1 = row.shipto_r1
pdf.ship_to_r2 = row.shipto_r2
pdf.ship_to_r3 = row.shipto_r3
pdf.ship_to_r4 = row.shipto_r4
pdf.ship_to_r5 = row.shipto_r5
pdf.agente_aduanal_r1 = row.aduana_r1
pdf.agente_aduanal_r2 = row.aduana_r2
pdf.agente_aduanal_r3 = row.aduana_r3
pdf.agente_aduanal_r4 = row.aduana_r4
pdf.agente_aduanal_r5 = row.aduana_r5
pdf.us_broker_r1 = row.broker_r1
pdf.us_broker_r2 = row.broker_r2
pdf.us_broker_r3 = row.broker_r3
pdf.us_broker_r4 = row.broker_r4
pdf.us_broker_r5 = row.broker_r5
pdf.shipping_order = str(row.shipping_order)
pdf.shipping_date = row.shipping_date
pdf.carrier = row.carrier
pdf.bl_number = str(row.bl_number)
pdf.comercial_terms = row.commercial_terms
pdf.clave_pedimento = row.clave_pedimento
pdf.peso_um = row.peso_um
pdf.moneda = row.moneda
#obtiene las partidas
cursor.close()
cursor = cnxn.cursor()
cursor.execute("""
select
seq,
isnull(parte_cliente,'') as parte_no,
isnull(descripcion,'') as descripcion,
isnull(descripcion_usa,'') as descripcion2,
isnull(pais_origen,'') as pais_origen,
isnull(cant,0) as cantidad,
isnull(peso,0) as peso,
isnull(precio,0) as precio,
isnull(total,0) as total,
isnull(orden_compra,'') as orden_compra
from
v_factura_reporte
where
factura=?
order by seq
""",row_mst.factura)
rows = cursor.fetchall()
partidas = {}
if rows:
for row in rows:
detalle = []
if row.seq != 99:
detalle.append(row.seq)
detalle.append(row.parte_no)
detalle.append(row.descripcion)
detalle.append(row.descripcion2 + ' PO: ' + row.orden_compra)
detalle.append(row.pais_origen)
detalle.append(str(row.cantidad))
detalle.append(str(row.peso))
detalle.append(str(row.precio))
detalle.append(str(row.total))
else:
detalle.append(row.seq+1)
detalle.append('')
detalle.append(row.descripcion)
detalle.append('')
detalle.append('')
detalle.append('')
detalle.append('')
detalle.append('')
detalle.append('')
partidas[row.parte_no] = detalle
cursor.close()
pdf.partidas = partidas
#esto se va implementar para engranes donde para los empaques
#obtener el peso total
#print 'obtiene el peso total'
#cursor = cnxn.cursor()
#cursor.execute("exec pg_gn_peso_total ?,0,0",factura)
#row = cursor.fetchone()
#if row:
# pdf.peso_total = str(row[0])
#cursor.close()
pdf.build_pdf()
#registrar la factura
cursor = cnxn.cursor()
cursor.execute("""
insert into asn_facturas_enviadas
(compania, cliente_mapics, factura, fecha_enviada)
values (?,?,?, getdate())
""",72,row_mst.cliente,row_mst.factura)
cursor.commit()
cursor.close()
#envia el mail
#invoices.saludo(self)
invoices.envia_mail(self,pdf.archivo_salida)
cursor_mst.close()
if __name__ == "__main__":
#Carga los parametros
with open('parametros_smx.json') as data_file:
data = json.load(data_file)
#Instanciando factura
oInvoices = invoices()
oInvoices.dsn = data["parametros"]["dsn"]
oInvoices.user = data["parametros"]["user"]
oInvoices.password = data["parametros"]["password"]
oInvoices.database = data["parametros"]["database"]
oInvoices.clientes = data["parametros"]["clientes"]
oInvoices.path_pdfs = data["parametros"]["path_pdfs"]
oInvoices.address_book = data["parametros"]["address_book"]
oInvoices.sender = data["parametros"]["smtp_sender"]
oInvoices.smtp_host = data["parametros"]["smtp_host"]
oInvoices.smtp_usuario = data["parametros"]["smtp_usuario"]
oInvoices.smtp_password = data["parametros"]["smtp_password"]
oInvoices.run()
#"""
#"""
#
#from psidialogs.easygui_api import *
#import psidialogs
#
#def main(backend = ''):
# if not backend:
# backend = psidialogs.choice(psidialogs.all_backends(), 'Select backend!')
# psidialogs.set_backend( force_backend=backend )
# _test()
#
#TkVersion=''
#EasyGuiRevisionInfo = ''
#
#def _test():
# """
# copy from easygui.py
# """
# # simple way to clear the console
# print "\n" * 100
# # START DEMONSTRATION DATA ===================================================
# choices_abc = ["This is choice 1", "And this is choice 2"]
# message = "Pick one! This is a huge choice, and you've got to make the right one " \
# "or you will surely mess up the rest of your life, and the lives of your " \
# "friends and neighbors!"
# title = ""
#
# # ============================= define a code snippet =========================
# code_snippet = ("dafsdfa dasflkj pp[oadsij asdfp;ij asdfpjkop asdfpok asdfpok asdfpok"*3) +"\n"+\
#"""# here is some dummy Python code
#for someItem in myListOfStuff:
# do something(someItem)
# do something()
# do something()
# if somethingElse(someItem):
# doSomethingEvenMoreInteresting()
#
#"""*16
# #======================== end of code snippet ==============================
#
# #================================= some text ===========================
# text_snippet = ((\
#"""It was the best of times, and it was the worst of times. The rich ate cake, and the poor had cake recommended to them, but wished only for enough cash to buy bread. The time was ripe for revolution! """ \
#*5)+"\n\n")*10
#
# #===========================end of text ================================
#
# intro_message = ("Pick the kind of box that you wish to demo.\n\n"
# + "In EasyGui, all GUI interactions are invoked by simple function calls.\n\n" +
# "EasyGui is different from other GUIs in that it is NOT event-driven. It allows" +
# " you to program in a traditional linear fashion, and to put up dialogs for simple" +
# " input and output when you need to. If you are new to the event-driven paradigm" +
# " for GUIs, EasyGui will allow you to be productive with very basic tasks" +
# " immediately. Later, if you wish to make the transition to an event-driven GUI" +
# " paradigm, you can move to an event-driven style with a more powerful GUI package" +
# "such as anygui, PythonCard, Tkinter, wxPython, etc."
# + "\n\nEasyGui is running Tk version: " + str(TkVersion)
# )
#
# #========================================== END DEMONSTRATION DATA
#
#
# while 1: # do forever
# choices = [
# "msgbox",
# "buttonbox",
# "choicebox",
# "multchoicebox",
# "textbox",
# "ynbox",
# "ccbox",
# "enterbox",
# "codebox",
# "integerbox",
# "boolbox",
# "indexbox",
# "filesavebox",
# "fileopenbox",
# "passwordbox",
# "multenterbox",
# "multpasswordbox",
# "diropenbox"
#
# ]
# choice = choicebox(intro_message, "EasyGui " + EasyGuiRevisionInfo, choices)
#
# if choice == None: return
#
# reply = choice.split()
#
# if reply[0] == "msgbox":
# reply = msgbox("short message", "This is a long title")
# print "Reply was:", reply
#
# elif reply[0] == "buttonbox":
# reply = buttonbox()
# print "Reply was:", reply
#
# reply = buttonbox(message, "Demo of Buttonbox with many, many buttons!", choices)
# print "Reply was:", reply
#
# elif reply[0] == "boolbox":
# reply = boolbox()
# print "Reply was:", reply
#
# elif reply[0] == "integerbox":
# reply = integerbox(
# "Enter a number between 3 and 333",
# "Demo: integerbox WITH a default value",
# 222, 3, 333)
# print "Reply was:", reply
#
# reply = integerbox(
# "Enter a number between 0 and 99",
# "Demo: integerbox WITHOUT a default value"
# )
# print "Reply was:", reply
#
# elif reply[0] == "diropenbox":
# title = "Demo of diropenbox"
# msg = "This is a test of the diropenbox.\n\nPick the directory that you wish to open."
# d = diropenbox(msg, title)
# print "You chose directory...:", d
#
# elif reply[0] == "fileopenbox":
# f = fileopenbox()
# print "You chose to open file:", f
#
# elif reply[0] == "filesavebox":
# f = filesavebox()
# print "You chose to save file:", f
#
# elif reply[0] == "indexbox":
# title = reply[0]
# msg = "Demo of " + reply[0]
# choices = ["Choice1", "Choice2", "Choice3", "Choice4"]
# reply = indexbox(msg, title, choices)
# print "Reply was:", reply
#
# elif reply[0] == "passwordbox":
# reply = passwordbox("Demo of password box WITHOUT default"
# + "\n\nEnter your secret password", "Member Logon")
# print "Reply was:", str(reply)
#
# reply = passwordbox("Demo of password box WITH default"
# + "\n\nEnter your secret password", "Member Logon", "alfie")
# print "Reply was:", str(reply)
#
# elif reply[0] == "enterbox":
# reply = enterbox("Enter the name of your best friend:", "Love!", "Suzy Smith")
# print "Reply was:", str(reply)
#
# reply = enterbox("Enter the name of your worst enemy:", "Hate!")
# print "Reply was:", str(reply)
#
# elif reply[0] == "multenterbox":
# msg = "Enter your personal information"
# title = "Credit Card Application"
# fieldNames = ["Name","Street Address","City","State","ZipCode"]
# fieldValues = [] # we start with blanks for the values
# fieldValues = multenterbox(msg,title, fieldNames)
#
# # make sure that none of the fields was left blank
# while 1:
# if fieldValues == None: break
# errmsg = ""
# for i in range(len(fieldNames)):
# if fieldValues[i].strip() == "":
# errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
# if errmsg == "": break # no problems found
# fieldValues = multenterbox(errmsg, title, fieldNames, fieldValues)
#
# print "Reply was:", fieldValues
#
# elif reply[0] == "multpasswordbox":
# msg = "Enter logon information"
# title = "Demo of multpasswordbox"
# fieldNames = ["Server ID", "User ID", "Password"]
# fieldValues = [] # we start with blanks for the values
# fieldValues = multpasswordbox(msg,title, fieldNames)
#
# # make sure that none of the fields was left blank
# while 1:
# if fieldValues == None: break
# errmsg = ""
# for i in range(len(fieldNames)):
# if fieldValues[i].strip() == "":
# errmsg = errmsg + ('"%s" is a required field.\n\n' % fieldNames[i])
# if errmsg == "": break # no problems found
# fieldValues = multpasswordbox(errmsg, title, fieldNames, fieldValues)
#
# print "Reply was:", fieldValues
#
#
# elif reply[0] == "ynbox":
# reply = ynbox(message, title)
# print "Reply was:", reply
#
# elif reply[0] == "ccbox":
# reply = ccbox(message)
# print "Reply was:", reply
#
# elif reply[0] == "choicebox":
# longchoice = "This is an example of a very long option which you may or may not wish to choose."*2
# listChoices = ["nnn", "ddd", "eee", "fff", "aaa", longchoice
# , "aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk", "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq", "rrr", "sss", "ttt", "uuu", "vvv"]
#
# message = "Pick something. " + ("A wrapable sentence of text ?! "*30) + "\nA separate line of text."*6
# reply = choicebox(message, None, listChoices)
# print "Reply was:", reply
#
# message = "Pick something. "
# reply = choicebox(message, None, listChoices)
# print "Reply was:", reply
#
# message = "Pick something. "
# reply = choicebox("The list of choices is empty!", None, [])
# print "Reply was:", reply
#
# elif reply[0] == "multchoicebox":
# listChoices = ["aaa", "bbb", "ccc", "ggg", "hhh", "iii", "jjj", "kkk"
# , "LLL", "mmm" , "nnn", "ooo", "ppp", "qqq"
# , "rrr", "sss", "ttt", "uuu", "vvv"]
#
# message = "Pick as many choices as you wish."
# reply = multchoicebox(message,"DEMO OF multchoicebox", listChoices)
# print "Reply was:", reply
#
# elif reply[0] == "textbox":
# message = "Here is some sample text. " * 16
# reply = textbox(message, "Text Sample", text_snippet)
# print "Reply was:", reply
#
# elif reply[0] == "codebox":
# message = "Here is some sample code. " * 16
# reply = codebox(message, "Code Sample", code_snippet)
# print "Reply was:", reply
#
# else:
# msgbox("Choice\n\n" + choice + "\n\nis not recognized", "Program Logic Error")
# return
#
#
###if __name__ == '__main__':
### _test()
# -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2008 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# util.py -- Misc utility functions
# Thomas Perl <[email protected]> 2007-08-04
#
"""Miscellaneous helper functions for gPodder
This module provides helper and utility functions for gPodder that
are not tied to any specific part of gPodder.
"""
import gpodder
from gpodder.liblogger import log
import gtk
import gobject
import os
import os.path
import glob
import stat
import re
import subprocess
from htmlentitydefs import entitydefs
import time
import locale
import gzip
import datetime
import threading
import urlparse
import urllib
import urllib2
import httplib
import webbrowser
import feedparser
import StringIO
import xml.dom.minidom
if gpodder.interface == gpodder.GUI:
ICON_UNPLAYED=gtk.STOCK_YES
ICON_LOCKED='emblem-nowrite'
elif gpodder.interface == gpodder.MAEMO:
ICON_UNPLAYED='qgn_list_gene_favor'
ICON_LOCKED='qgn_indi_KeypadLk_lock'
def make_directory( path):
"""
Tries to create a directory if it does not exist already.
Returns True if the directory exists after the function
call, False otherwise.
"""
if os.path.isdir( path):
return True
try:
os.makedirs( path)
except:
log( 'Could not create directory: %s', path)
return False
return True
def normalize_feed_url( url):
"""
Converts any URL to http:// or ftp:// so that it can be
used with "wget". If the URL cannot be converted (invalid
or unknown scheme), "None" is returned.
This will also normalize feed:// and itpc:// to http://
Also supported are phobos.apple.com links (iTunes podcast)
and itms:// links (iTunes podcast direct link).
"""
if not url or len( url) < 8:
return None
if url.startswith('itms://'):
url=parse_itunes_xml(url)
# Links to "phobos.apple.com"
url=itunes_discover_rss(url)
if url is None:
return None
if url.startswith( 'http://') or url.startswith( 'https://') or url.startswith( 'ftp://'):
return url
if url.startswith('feed://') or url.startswith('itpc://'):
return 'http://' + url[7:]
return None
def username_password_from_url( url):
"""
Returns a tuple (username,password) containing authentication
data from the specified URL or (None,None) if no authentication
data can be found in the URL.
"""
(username, password)=(None, None)
(scheme, netloc, path, params, query, fragment)=urlparse.urlparse( url)
if '@' in netloc:
(authentication, netloc)=netloc.rsplit('@', 1)
if ':' in authentication:
(username, password)=authentication.split(':', 1)
username=urllib.unquote(username)
password=urllib.unquote(password)
else:
username=urllib.unquote(authentication)
return (username, password)
def directory_is_writable( path):
"""
Returns True if the specified directory exists and is writable
by the current user.
"""
return os.path.isdir( path) and os.access( path, os.W_OK)
def calculate_size( path):
"""
Tries to calculate the size of a directory, including any
subdirectories found. The returned value might not be
correct if the user doesn't have appropriate permissions
to list all subdirectories of the given path.
"""
if path is None:
return 0L
if os.path.dirname( path) == '/':
return 0L
if os.path.isfile( path):
return os.path.getsize( path)
if os.path.isdir( path) and not os.path.islink( path):
sum=os.path.getsize( path)
try:
for item in os.listdir(path):
try:
sum += calculate_size(os.path.join(path, item))
except:
log('Cannot get size for %s', path)
except:
log('Cannot access: %s', path)
return sum
return 0L
def file_modification_datetime(filename):
"""
Returns the modification date of the specified file
as a datetime.datetime object or None if the modification
date cannot be determined.
"""
if filename is None:
return None
if not os.access(filename, os.R_OK):
return None
try:
s=os.stat(filename)
timestamp=s[stat.ST_MTIME]
return datetime.datetime.fromtimestamp(timestamp)
except:
log('Cannot get modification timestamp for %s', filename)
return None
def file_age_in_days(filename):
"""
Returns the age of the specified filename in days or
zero if the modification date cannot be determined.
"""
dt=file_modification_datetime(filename)
if dt is None:
return 0
else:
return (datetime.datetime.now()-dt).days
def file_age_to_string(days):
"""
Converts a "number of days" value to a string that
can be used in the UI to display the file age.
>>> file_age_to_string(0)
''
>>> file_age_to_string(1)
'one day ago'
>>> file_age_to_String(2)
'2 days ago'
"""
if days == 1:
return _('one day ago')
elif days > 1:
return _('%d days ago') % days
else:
return ''
def get_free_disk_space(path):
"""
Calculates the free disk space available to the current user
on the file system that contains the given path.
If the path (or its parent folder) does not yet exist, this
function returns zero.
"""
path=os.path.dirname(path)
if not os.path.exists(path):
return 0
s=os.statvfs(path)
return s.f_bavail * s.f_bsize
def format_date(timestamp):
"""
Converts a UNIX timestamp to a date representation. This
function returns "Today", "Yesterday", a weekday name or
the date in %x format, which (according to the Python docs)
is the "Locale's appropriate date representation".
Returns None if there has been an error converting the
timestamp to a string representation.
"""
seconds_in_a_day=60*60*24
try:
diff=int((time.time()+1)/seconds_in_a_day) - int(timestamp/seconds_in_a_day)
except:
log('Warning: Cannot convert "%s" to date.', timestamp, traceback=True)
return None
if diff == 0:
return _('Today')
elif diff == 1:
return _('Yesterday')
elif diff < 7:
# Weekday name
return str(datetime.datetime.fromtimestamp(timestamp).strftime('%A'))
else:
# Locale's appropriate date representation
return str(datetime.datetime.fromtimestamp(timestamp).strftime('%x'))
def format_filesize(bytesize, use_si_units=False, digits=2):
"""
Formats the given size in bytes to be human-readable,
Returns a localized "(unknown)" string when the bytesize
has a negative value.
"""
si_units=(
( 'kB', 10**3 ),
( 'MB', 10**6 ),
( 'GB', 10**9 ),
)
binary_units=(
( 'KiB', 2**10 ),
( 'MiB', 2**20 ),
( 'GiB', 2**30 ),
)
try:
bytesize=float( bytesize)
except:
return _('(unknown)')
if bytesize < 0:
return _('(unknown)')
if use_si_units:
units=si_units
else:
units=binary_units
( used_unit, used_value )=( 'B', bytesize )
for ( unit, value ) in units:
if bytesize >= value:
used_value=bytesize / float(value)
used_unit=unit
return ('%.'+str(digits)+'f %s') % (used_value, used_unit)
def delete_file( path):
"""
Tries to delete the given filename and silently
ignores deletion errors (if the file doesn't exist).
Also deletes extracted cover files if they exist.
"""
log( 'Trying to delete: %s', path)
try:
os.unlink( path)
# Remove any extracted cover art that might exist
for cover_file in glob.glob( '%s.cover.*' % ( path, )):
os.unlink( cover_file)
except:
pass
def remove_html_tags(html):
"""
Remove HTML tags from a string and replace numeric and
named entities with the corresponding character, so the
HTML text can be displayed in a simple text view.
"""
# If we would want more speed, we could make these global
re_strip_tags=re.compile('<[^>]*>')
re_unicode_entities=re.compile('&#(\d{2,4});')
re_html_entities=re.compile('&(.{2,8});')
# Remove all HTML/XML tags from the string
result=re_strip_tags.sub('', html)
# Convert numeric XML entities to their unicode character
result=re_unicode_entities.sub(lambda x: unichr(int(x.group(1))), result)
# Convert named HTML entities to their unicode character
result=re_html_entities.sub(lambda x: unicode(entitydefs.get(x.group(1),''), 'iso-8859-1'), result)
return result.strip()
def torrent_filename( filename):
"""
Checks if a file is a ".torrent" file by examining its
contents and searching for the file name of the file
to be downloaded.
Returns the name of the file the ".torrent" will download
or None if no filename is found (the file is no ".torrent")
"""
if not os.path.exists( filename):
return None
header=open( filename).readline()
try:
header.index( '6:pieces')
name_length_pos=header.index('4:name') + 6
colon_pos=header.find( ':', name_length_pos)
name_length=int(header[name_length_pos:colon_pos]) + 1
name=header[(colon_pos + 1):(colon_pos + name_length)]
return name
except:
return None
def file_extension_from_url( url):
"""
Extracts the (lowercase) file name extension (with dot)
from a URL, e.g. http://server.com/file.MP3?download=yes
will result in the string ".mp3" being returned.
This function will also try to best-guess the "real"
extension for a media file (audio, video, torrent) by
trying to match an extension to these types and recurse
into the query string to find better matches, if the
original extension does not resolve to a known type.
http://my.net/redirect.php?my.net/file.ogg => ".ogg"
http://server/get.jsp?file=/episode0815.MOV => ".mov"
"""
(scheme, netloc, path, para, query, fragid)=urlparse.urlparse(url)
filename=os.path.basename( urllib.unquote(path))
(filename, extension)=os.path.splitext(filename)
if file_type_by_extension(extension) is not None:
# We have found a valid extension (audio, video, torrent)
return extension.lower()
# If the query string looks like a possible URL, try that first
if len(query.strip()) > 0 and query.find('/') != -1:
query_url='://'.join((scheme, urllib.unquote(query)))
query_extension=file_extension_from_url(query_url)
if file_type_by_extension(query_extension) is not None:
return query_extension
# No exact match found, simply return the original extension
return extension.lower()
def file_type_by_extension( extension):
"""
Tries to guess the file type by looking up the filename
extension from a table of known file types. Will return
the type as string ("audio", "video" or "torrent") or
None if the file type cannot be determined.
"""
types={
'audio': [ 'mp3', 'ogg', 'wav', 'wma', 'aac', 'm4a' ],
'video': [ 'mp4', 'avi', 'mpg', 'mpeg', 'm4v', 'mov', 'divx', 'flv', 'wmv', '3gp' ],
'torrent': [ 'torrent' ],
}
if extension == '':
return None
if extension[0] == '.':
extension=extension[1:]
extension=extension.lower()
for type in types:
if extension in types[type]:
return type
return None
def get_tree_icon(icon_name, add_bullet=False, add_padlock=False, icon_cache=None, icon_size=32):
"""
Loads an icon from the current icon theme at the specified
size, suitable for display in a gtk.TreeView.
Optionally adds a green bullet (the GTK Stock "Yes" icon)
to the Pixbuf returned. Also, a padlock icon can be added.
If an icon_cache parameter is supplied, it has to be a
dictionary and will be used to store generated icons.
On subsequent calls, icons will be loaded from cache if
the cache is supplied again and the icon is found in
the cache.
"""
global ICON_UNPLAYED, ICON_LOCKED
if icon_cache is not None and (icon_name,add_bullet,add_padlock,icon_size) in icon_cache:
return icon_cache[(icon_name,add_bullet,add_padlock,icon_size)]
icon_theme=gtk.icon_theme_get_default()
try:
icon=icon_theme.load_icon(icon_name, icon_size, 0)
except:
log( '(get_tree_icon) Warning: Cannot load icon with name "%s", will use default icon.', icon_name)
icon=icon_theme.load_icon(gtk.STOCK_DIALOG_QUESTION, icon_size, 0)
if icon and (add_bullet or add_padlock):
# We'll modify the icon, so use .copy()
if add_bullet:
try:
icon=icon.copy()
emblem=icon_theme.load_icon(ICON_UNPLAYED, int(float(icon_size)*1.2/3.0), 0)
(width, height)=(emblem.get_width(), emblem.get_height())
xpos=icon.get_width() - width
ypos=icon.get_height() - height
emblem.composite(icon, xpos, ypos, width, height, xpos, ypos, 1, 1, gtk.gdk.INTERP_BILINEAR, 255)
except:
log('(get_tree_icon) Error adding emblem to icon "%s".', icon_name)
if add_padlock:
try:
icon=icon.copy()
emblem=icon_theme.load_icon(ICON_LOCKED, int(float(icon_size)/2.0), 0)
(width, height)=(emblem.get_width(), emblem.get_height())
emblem.composite(icon, 0, 0, width, height, 0, 0, 1, 1, gtk.gdk.INTERP_BILINEAR, 255)
except:
log('(get_tree_icon) Error adding emblem to icon "%s".', icon_name)
if icon_cache is not None:
icon_cache[(icon_name,add_bullet,add_padlock,icon_size)]=icon
return icon
def get_first_line( s):
"""
Returns only the first line of a string, stripped so
that it doesn't have whitespace before or after.
"""
return s.strip().split('\n')[0].strip()
def updated_parsed_to_rfc2822( updated_parsed):
"""
Converts a 9-tuple from feedparser's updated_parsed
field to a C-locale string suitable for further use.
If the updated_parsed field is None or not a 9-tuple,
this function returns None.
"""
if updated_parsed is None or len(updated_parsed) != 9:
return None
old_locale=locale.getlocale( locale.LC_TIME)
locale.setlocale( locale.LC_TIME, 'C')
result=time.strftime( '%a, %d %b %Y %H:%M:%S GMT', updated_parsed)
if old_locale != (None, None):
try:
locale.setlocale( locale.LC_TIME, old_locale)
except:
log('Cannot revert locale to (%s, %s)', *old_locale)
pass
return result
def object_string_formatter( s, **kwargs):
"""
Makes attributes of object passed in as keyword
arguments available as {OBJECTNAME.ATTRNAME} in
the passed-in string and returns a string with
the above arguments replaced with the attribute
values of the corresponding object.
Example:
e=Episode()
e.title='Hello'
s='{episode.title} World'
print object_string_formatter( s, episode=e)
=> 'Hello World'
"""
result=s
for ( key, o ) in kwargs.items():
matches=re.findall( r'\{%s\.([^\}]+)\}' % key, s)
for attr in matches:
if hasattr( o, attr):
try:
from_s='{%s.%s}' % ( key, attr )
to_s=getattr( o, attr)
result=result.replace( from_s, to_s)
except:
log( 'Could not replace attribute "%s" in string "%s".', attr, s)
return result
def format_desktop_command( command, filename):
"""
Formats a command template from the "Exec=" line of a .desktop
file to a string that can be invoked in a shell.
Handled format strings: %U, %u, %F, %f and a fallback that
appends the filename as first parameter of the command.
See http://standards.freedesktop.org/desktop-entry-spec/1.0/ar01s06.html
"""
items={
'%U': 'file://%s' % filename,
'%u': 'file://%s' % filename,
'%F': filename,
'%f': filename,
}
for key, value in items.items():
if command.find( key) >= 0:
return command.replace( key, value)
return '%s "%s"' % ( command, filename )
def find_command( command):
"""
Searches the system's PATH for a specific command that is
executable by the user. Returns the first occurence of an
executable binary in the PATH, or None if the command is
not available.
"""
if 'PATH' not in os.environ:
return None
for path in os.environ['PATH'].split( os.pathsep):
command_file=os.path.join( path, command)
if os.path.isfile( command_file) and os.access( command_file, os.X_OK):
return command_file
return None
def parse_itunes_xml(url):
"""
Parses an XML document in the "url" parameter (this has to be
a itms:// or http:// URL to a XML doc) and searches all "<dict>"
elements for the first occurence of a "<key>feedURL</key>"
element and then continues the search for the string value of
this key.
This returns the RSS feed URL for Apple iTunes Podcast XML
documents that are retrieved by itunes_discover_rss().
"""
url=url.replace('itms://', 'http://')
doc=http_get_and_gunzip(url)
try:
d=xml.dom.minidom.parseString(doc)
except Exception, e:
log('Error parsing document from itms:// URL: %s', e)
return None
last_key=None
for pairs in d.getElementsByTagName('dict'):
for node in pairs.childNodes:
if node.nodeType != node.ELEMENT_NODE:
continue
if node.tagName == 'key' and node.childNodes.length > 0:
if node.firstChild.nodeType == node.TEXT_NODE:
last_key=node.firstChild.data
if last_key != 'feedURL':
continue
if node.tagName == 'string' and node.childNodes.length > 0:
if node.firstChild.nodeType == node.TEXT_NODE:
return node.firstChild.data
return None
def http_get_and_gunzip(uri):
"""
Does a HTTP GET request and tells the server that we accept
gzip-encoded data. This is necessary, because the Apple iTunes
server will always return gzip-encoded data, regardless of what
we really request.
Returns the uncompressed document at the given URI.
"""
request=urllib2.Request(uri)
request.add_header("Accept-encoding", "gzip")
usock=urllib2.urlopen(request)
data=usock.read()
if usock.headers.get('content-encoding', None) == 'gzip':
data=gzip.GzipFile(fileobj=StringIO.StringIO(data)).read()
return data
def itunes_discover_rss(url):
"""
Takes an iTunes-specific podcast URL and turns it
into a "normal" RSS feed URL. If the given URL is
not a phobos.apple.com URL, we will simply return
the URL and assume it's already an RSS feed URL.
Idea from Andrew Clarke's itunes-url-decoder.py
"""
if url is None:
return url
if not 'phobos.apple.com' in url.lower():
# This doesn't look like an iTunes URL
return url
try:
data=http_get_and_gunzip(url)
(url,)=re.findall("itmsOpen\('([^']*)", data)
return parse_itunes_xml(url)
except:
return None
def idle_add(func, *args):
"""
This is a wrapper function that does the Right
Thing depending on if we are running a GTK+ GUI or
not. If not, we're simply calling the function.
If we are a GUI app, we use gobject.idle_add() to
call the function later - this is needed for
threads to be able to modify GTK+ widget data.
"""
if gpodder.interface in (gpodder.GUI, gpodder.MAEMO):
def x(f, *a):
f(*a)
return False
gobject.idle_add(func, *args)
else:
func(*args)
def discover_bluetooth_devices():
"""
This is a generator function that returns
(address, name) tuples of all nearby bluetooth
devices found.
If the user has python-bluez installed, it will
be used. If not, we're trying to use "hcitool".
If neither python-bluez or hcitool are available,
this function is the empty generator.
"""
try:
# If the user has python-bluez installed
import bluetooth
log('Using python-bluez to find nearby bluetooth devices')
for name, addr in bluetooth.discover_devices(lookup_names=True):
yield (name, addr)
except:
if find_command('hcitool') is not None:
log('Using hcitool to find nearby bluetooth devices')
# If the user has "hcitool" installed
p=subprocess.Popen(['hcitool', 'scan'], stdout=subprocess.PIPE)
for line in p.stdout:
match=re.match('^\t([^\t]+)\t([^\t]+)\n$', line)
if match is not None:
(addr, name)=match.groups()
yield (name, addr)
else:
log('Cannot find either python-bluez or hcitool - no bluetooth?')
return # <= empty generator
def bluetooth_send_file(filename, device=None, callback_finished=None):
"""
Sends a file via bluetooth using gnome-obex send.
Optional parameter device is the bluetooth address
of the device; optional parameter callback_finished
is a callback function that will be called when the
sending process has finished - it gets one parameter
that is either True (when sending succeeded) or False
when there was some error.
This function tries to use "bluetooth-sendto", and if
it is not available, it also tries "gnome-obex-send".
"""
command_line=None
if find_command('bluetooth-sendto'):
command_line=['bluetooth-sendto']
if device is not None:
command_line.append('--device=%s' % device)
elif find_command('gnome-obex-send'):
command_line=['gnome-obex-send']
if device is not None:
command_line += ['--dest', device]
if command_line is not None:
command_line.append(filename)
result=(subprocess.Popen(command_line).wait() == 0)
if callback_finished is not None:
callback_finished(result)
return result
else:
log('Cannot send file. Please install "bluetooth-sendto" or "gnome-obex-send".')
if callback_finished is not None:
callback_finished(False)
return False
def format_seconds_to_hour_min_sec(seconds):
"""
Take the number of seconds and format it into a
human-readable string (duration).
>>> format_seconds_to_hour_min_sec(3834)
'1 hour, 3 minutes and 54 seconds'
>>> format_seconds_to_hour_min_sec(2600)
'1 hour'
>>> format_seconds_to_hour_min_sec(62)
'1 minute and 2 seconds'
"""
if seconds < 1:
return _('0 seconds')
result=[]
hours=seconds/3600
seconds=seconds%3600
minutes=seconds/60
seconds=seconds%60
if hours == 1:
result.append(_('1 hour'))
elif hours > 1:
result.append(_('%i hours') % hours)
if minutes == 1:
result.append(_('1 minute'))
elif minutes > 1:
result.append(_('%i minutes') % minutes)
if seconds == 1:
result.append(_('1 second'))
elif seconds > 1:
result.append(_('%i seconds') % seconds)
if len(result) > 1:
return (' '+_('and')+' ').join((', '.join(result[:-1]), result[-1]))
else:
return result[0]
def get_episode_info_from_url(url, proxy=None):
"""
Try to get information about a podcast episode by sending
a HEAD request to the HTTP server and parsing the result.
The return value is a dict containing all fields that
could be parsed from the URL. This currently contains:
"length": The size of the file in bytes
"pubdate": A formatted representation of the pubDate
If the "proxy" parameter is used, it has to be the URL
of the HTTP proxy server to use, e.g. http://proxy:8080/
If there is an error, this function returns {}. This will
only function with http:// and https:// URLs.
"""
if not (url.startswith('http://') or url.startswith('https://')):
return {}
if proxy is None or proxy.strip() == '':
(scheme, netloc, path, parms, qry, fragid)=urlparse.urlparse(url)
conn=httplib.HTTPConnection(netloc)
start=len(scheme) + len('://') + len(netloc)
conn.request('HEAD', url[start:])
else:
(scheme, netloc, path, parms, qry, fragid)=urlparse.urlparse(proxy)
conn=httplib.HTTPConnection(netloc)
conn.request('HEAD', url)
r=conn.getresponse()
result={}
log('Trying to get metainfo for %s', url)
if 'content-length' in r.msg:
try:
length=int(r.msg['content-length'])
result['length']=length
except ValueError, e:
log('Error converting content-length header.')
if 'last-modified' in r.msg:
try:
parsed_date=feedparser._parse_date(r.msg['last-modified'])
pubdate=updated_parsed_to_rfc2822(parsed_date)
result['pubdate']=pubdate
except:
log('Error converting last-modified header.')
return result
def gui_open(filename):
"""
Open a file or folder with the default application set
by the Desktop environment. This uses "xdg-open".
"""
try:
subprocess.Popen(['xdg-open', filename])
# FIXME: Win32-specific "open" code needed here
# as fallback when xdg-open not available
except:
log('Cannot open file/folder: "%s"', folder, sender=self, traceback=True)
def open_website(url):
"""
Opens the specified URL using the default system web
browser. This uses Python's "webbrowser" module, so
make sure your system is set up correctly.
"""
threading.Thread(target=webbrowser.open, args=(url,)).start()
def sanitize_filename(filename):
"""
Generate a sanitized version of a filename that can
be written on disk (i.e. remove/replace invalid
characters and encode in the native language)
"""
# Try to detect OS encoding (by Leonid Ponomarev)
if 'LANG' in os.environ and '.' in os.environ['LANG']:
lang=os.environ['LANG']
(language, encoding)=lang.rsplit('.', 1)
log('Detected encoding: %s', encoding)
enc=encoding
else:
# Using iso-8859-15 here as (hopefully) sane default
# see http://en.wikipedia.org/wiki/ISO/IEC_8859-1
log('Using ISO-8859-15 as encoding. If this')
log('is incorrect, please set your $LANG variable.')
enc='iso-8859-15'
return re.sub('[/|?*<>:+\[\]\"\\\]', '_', filename.strip().encode(enc, 'ignore'))
def find_mount_point(directory):
"""
Try to find the mount point for a given directory.
If the directory is itself a mount point, return
it. If not, remove the last part of the path and
re-check if it's a mount point. If the directory
resides on your root filesystem, "/" is returned.
"""
while os.path.split(directory)[0] != '/':
if os.path.ismount(directory):
return directory
else:
(directory, tail_data)=os.path.split(directory)
return '/'
#-*- encoding: utf8 -*-
"""
Triangle, pentagonal, and hexagonal numbers are generated by the following formulae:
Triangle Tn=n(n+1)/2 1, 3, 6, 10, 15, ...
Pentagonal Pn=n(3n-1)/2 1, 5, 12, 22, 35, ...
Hexagonal Hn=n(2n-1) 1, 6, 15, 28, 45, ...
It can be verified that T285 = P165 = H143 = 40755.
Find the next triangle number that is also pentagonal and hexagonal.
"""
from eulerfunc import triangle, ispentagonal, ishexagonal
def solution():
"""
Bryukh's solution
"""
for i in xrange(286, 100000):
tr = triangle(i)
if ispentagonal(tr) and ishexagonal(tr):
return tr
return None
if __name__ == '__main__':
pass
UTF-8
Python
false
false
2,012
9,363,028,748,213
61bf2e292f91077cfac3a009072dbc6be8c0ed17
9d767c7df630aa7782264cc51073065e1f5d4c5d
/mlia-examples/src/book/regression/treeregress.py
e623432dc69009321762728aac280e27372e6dc5
[]
no_license
GomesNayagam/workspace
https://github.com/GomesNayagam/workspace
497e6eaad2785875a02f870cd384516b72501110
d23e806cbbe0decc8a34bcd61636468a46f439a4
refs/heads/master
2016-09-06T17:45:52.800243
2014-09-25T13:51:20
2014-09-25T13:51:20
24,454,554
1
0
null
null
null
null
null
null
null
null
null
null
null
null
null
from __future__ import division
from numpy import *
class TreeNode():
def __init__(self, feat, val, right, left):
featureToSplitOn = feat
valueOfSplit = val
rightBranch = right
leftBranch = left
def loadDataSet(filename):
data = []
for line in open(filename).readlines():
data.append(map(lambda x: float(x), line.strip().split("\t")))
return data
def binSplitDataSet(dataset, feature, value):
mat0 = dataset[nonzero(dataset[:, feature] > value)[0], :][0]
mat1 = dataset[nonzero(dataset[:, feature] <= value)[0], :][0]
return mat0, mat1
def modelErr(dataSet):
ws,X,Y = linearSolve(dataSet)
yHat = X * ws
return sum(power(Y - yHat,2))
def regLeaf(dataSet):
return mean(dataSet[:, -1])
def regErr(dataSet):
return var(dataSet[:, -1]) * shape(dataSet)[0]
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1,4)):
tolS = ops[0]
tolN = ops[1]
# if all the target variables are the same value: quit and return value
if len(set(dataSet[:, -1].T.tolist()[0])) == 1: #exit cond 1
return None, leafType(dataSet)
m, n = shape(dataSet)
# the choice of the best feature is driven by Reduction in RSS error from mean
S = errType(dataSet)
bestS = inf
bestIndex = 0
bestValue = 0
for featIndex in range(n-1):
for splitVal in set(dataSet[:, featIndex]):
mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)
if shape(mat0)[0] < tolN or shape(mat1)[0] < tolN:
continue
newS = errType(mat0) + errType(mat1)
if newS < bestS:
bestIndex = featIndex
bestValue = splitVal
bestS = newS
# if the decrease (S-bestS) is less than a threshold don't do the split
if (S - bestS) < tolS:
return None, leafType(dataSet) #exit cond 2
mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)
if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): #exit cond 3
return None, leafType(dataSet)
return bestIndex,bestValue #returns the best feature to split on
#and the value used for that split
def createTree(dataset, leafType=regLeaf, errType=regErr, ops=(1,4)):
feat, val = chooseBestSplit(dataset, leafType, errType, ops)
if feat == None:
return val
retTree = {}
retTree["spInd"] = feat
retTree["spVal"] = val
lset, rset = binSplitDataSet(dataset, feat, val)
retTree["left"]= createTree(lset, leafType, errType, ops)
retTree["right"]= createTree(rset, leafType, errType, ops)
return retTree
def isTree(obj):
return type(obj).__name__ == "dict"
def getMean(tree):
if isTree(tree["right"]):
tree["right"] = getMean(tree["right"])
if isTree(tree["left"]):
tree["left"] = getMean(tree["left"])
return (tree["left"] + tree["right"]) / 2
def prune(tree, testData):
if shape(testData)[0] == 0:
return getMean(tree)
if isTree(tree['right']) or isTree(tree['left']):
lSet, rSet = binSplitDataSet(testData, tree["spInd"], tree["spVal"])
if isTree(tree['left']):
tree['left'] = prune(tree['left'], lSet)
if isTree(tree['right']):
tree['right'] = prune(tree['right'], rSet)
if not isTree(tree['left']) and not isTree(tree['right']):
lSet, rSet = binSplitDataSet(testData, tree["spInd"], tree["spVal"])
errorNoMerge = sum(power(lSet[:,-1] - tree['left'],2)) + \
sum(power(rSet[:,-1] - tree['right'],2))
treeMean = (tree['left'] + tree['right']) / 2.0
errorMerge = sum(power(testData[:, -1] - treeMean, 2))
if errorMerge < errorNoMerge:
print "merging"
return treeMean
else: return tree
else:
return tree
def linearSolve(data):
m, n = shape(data)
X = mat(ones((m, n)))
Y = mat(ones((m, 1)))
X[:, 1:n] = data[:, 0:n-1]
Y = data[:, -1]
xTx = X.T * X
if linalg.det(xTx) == 0.0:
raise NameError("singular matrix, can't invert, " +
"try increasing second value of ops")
ws = xTx.I * (X.T * Y)
return ws, X, Y
def modelLeaf(data):
ws, X, Y = linearSolve(data)
return ws
def modelErr(data):
ws, X, Y = linearSolve(data)
yHat = X * ws
return sum(power(Y - yHat, 2))
def regTreeEval(model, data):
return float(model)
def modelTreeEval(model, data):
n = shape(data)[1]
X = mat(ones((1, n + 1)))
X[:, 1:n+1] = data
return float(X * model)
def treeForecast(tree, data, modelEval=regTreeEval):
if not isTree(tree):
return modelEval(tree, data)
if data[tree["spInd"]] > tree["spVal"]:
if isTree(tree["left"]):
return treeForecast(tree["left"], data, modelEval)
else:
return modelEval(tree["left"], data)
else:
if isTree(tree["right"]):
return treeForecast(tree["right"], data, modelEval)
else:
return modelEval(tree["right"], data)
def createForecast(tree, testData, modelEval=regTreeEval):
m = len(testData)
yHat = mat(zeros((m, 1)))
for i in range(0, m):
yHat[i, 0] = treeForecast(tree, testData[i], modelEval)
return yHat
def main():
#testMat = amat(eye(4))
#print testMat
#mat0, mat1 = binSplitDataSet(testMat, 1, 0.5)
#print "mat0=", mat0
#print "mat1=", mat1
#tree = createTree(mat(loadDataSet("ex00.txt")))
#print tree
#tree2 = createTree(mat(loadDataSet("ex0.txt")))
#print tree2
#tree3 = createTree(mat(loadDataSet("ex0.txt")), ops=[0, 1])
#print tree3
# first call creates many leaves, second creates 2
#tree4 = createTree(mat(loadDataSet("ex2.txt")))
#print tree4
#tree5 = createTree(mat(loadDataSet("ex2.txt")), ops=[10000, 4])
#print tree5
#tree6 = createTree(mat(loadDataSet("ex2.txt")), ops=[0, 1])
#testData = mat(loadDataSet("ex2test.txt"))
#prune(tree6, testData)
#print tree6
## model trees
#datamatrix = mat(loadDataSet("exp2.txt"))
#tree7 = createTree(datamatrix, modelLeaf, modelErr, (1, 10))
#print tree7
## bike speeds
trainmatrix = mat(loadDataSet("bikeSpeedVsIq_train.txt"))
testmatrix = mat(loadDataSet("bikeSpeedVsIq_test.txt"))
# reg tree
tree = createTree(trainmatrix, ops=(1, 20))
yHat = createForecast(tree, testmatrix[:, 0])
print "r-squared(reg)=", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]
# model tree
mtree = createTree(trainmatrix, modelLeaf, modelErr, (1, 20))
yHat = createForecast(mtree, testmatrix[:, 0], modelTreeEval)
print "r-squared(model)=", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]
# linear solver
ws, X, Y = linearSolve(trainmatrix)
for i in range(shape(testmatrix)[0]):
yHat[i] = testmatrix[i,0] * ws[1,0] + ws[0,0]
print "r-squared(lin)=", corrcoef(yHat, testmatrix[:, 1], rowvar=0)[0, 1]
if __name__ == "__main__":
main()
UTF-8
Python
false
false
2,014
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.