commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
10
2.94k
new_contents
stringlengths
21
3.18k
subject
stringlengths
16
444
message
stringlengths
17
2.63k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43k
ndiff
stringlengths
52
3.32k
instruction
stringlengths
16
444
content
stringlengths
133
4.32k
fuzzy_diff
stringlengths
16
3.18k
5556c3c0b4fc55f17de1f3d8d96288746a36775a
src/server/main.py
src/server/main.py
from twisted.internet.task import LoopingCall from twisted.python import log as twistedLog from src.shared import config from src.server.game_state_manager import GameStateManager from src.server.networking import runServer, ConnectionManager from src.server.stdio import setupStdio def main(args): connections = ConnectionManager() gameStateManager = GameStateManager(connections) connections.setGameStateHandler(gameStateManager) setupStdio(gameStateManager) loop = LoopingCall(gameStateManager.tick) deferred = loop.start(config.TICK_LENGTH) deferred.addErrback(twistedLog.err) runServer(args.port, connections)
from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.python import log as twistedLog from src.shared import config from src.server.game_state_manager import GameStateManager from src.server.networking import runServer, ConnectionManager from src.server.stdio import setupStdio def unhandledError(reason): twistedLog.err(reason, "Aborting due to unhandled error.") reactor.stop() def main(args): connections = ConnectionManager() gameStateManager = GameStateManager(connections) connections.setGameStateHandler(gameStateManager) setupStdio(gameStateManager) loop = LoopingCall(gameStateManager.tick) deferred = loop.start(config.TICK_LENGTH) deferred.addErrback(unhandledError) runServer(args.port, connections)
Bring down the server on (some?) uncaught errors.
Bring down the server on (some?) uncaught errors. I added an errback to the LoopingCall for gameStateManager.tick, so it'll be called if any exception gets raised out of one of those calls. The errback just prints a traceback and then brings down the server, ensuring that other clients get disconnected as well. This is at least some progress on #31, though it's hard to know if the issue is really fully fixed.
Python
mit
CheeseLord/warts,CheeseLord/warts
+ from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.python import log as twistedLog from src.shared import config from src.server.game_state_manager import GameStateManager from src.server.networking import runServer, ConnectionManager from src.server.stdio import setupStdio + + def unhandledError(reason): + twistedLog.err(reason, "Aborting due to unhandled error.") + reactor.stop() def main(args): connections = ConnectionManager() gameStateManager = GameStateManager(connections) connections.setGameStateHandler(gameStateManager) setupStdio(gameStateManager) loop = LoopingCall(gameStateManager.tick) deferred = loop.start(config.TICK_LENGTH) - deferred.addErrback(twistedLog.err) + deferred.addErrback(unhandledError) runServer(args.port, connections)
Bring down the server on (some?) uncaught errors.
## Code Before: from twisted.internet.task import LoopingCall from twisted.python import log as twistedLog from src.shared import config from src.server.game_state_manager import GameStateManager from src.server.networking import runServer, ConnectionManager from src.server.stdio import setupStdio def main(args): connections = ConnectionManager() gameStateManager = GameStateManager(connections) connections.setGameStateHandler(gameStateManager) setupStdio(gameStateManager) loop = LoopingCall(gameStateManager.tick) deferred = loop.start(config.TICK_LENGTH) deferred.addErrback(twistedLog.err) runServer(args.port, connections) ## Instruction: Bring down the server on (some?) uncaught errors. ## Code After: from twisted.internet import reactor from twisted.internet.task import LoopingCall from twisted.python import log as twistedLog from src.shared import config from src.server.game_state_manager import GameStateManager from src.server.networking import runServer, ConnectionManager from src.server.stdio import setupStdio def unhandledError(reason): twistedLog.err(reason, "Aborting due to unhandled error.") reactor.stop() def main(args): connections = ConnectionManager() gameStateManager = GameStateManager(connections) connections.setGameStateHandler(gameStateManager) setupStdio(gameStateManager) loop = LoopingCall(gameStateManager.tick) deferred = loop.start(config.TICK_LENGTH) deferred.addErrback(unhandledError) runServer(args.port, connections)
// ... existing code ... from twisted.internet import reactor from twisted.internet.task import LoopingCall // ... modified code ... from src.server.stdio import setupStdio def unhandledError(reason): twistedLog.err(reason, "Aborting due to unhandled error.") reactor.stop() ... deferred = loop.start(config.TICK_LENGTH) deferred.addErrback(unhandledError) // ... rest of the code ...
b16016994f20945a8a2bbb63b9cb920d856ab66f
web/attempts/migrations/0008_add_submission_date.py
web/attempts/migrations/0008_add_submission_date.py
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attempts', '0007_auto_20161004_0927'), ] operations = [ migrations.AddField( model_name='attempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.RunSQL( 'UPDATE attempts_historicalattempt SET submission_date = history_date' ), migrations.RunSQL( '''UPDATE attempts_attempt SET submission_date = ( SELECT max(history_date) FROM attempts_historicalattempt WHERE attempts_attempt.user_id = user_id AND attempts_attempt.part_id = part_id ) ''' ), migrations.AlterField( model_name='attempt', name='submission_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(blank=True, editable=False), ), ]
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attempts', '0007_auto_20161004_0927'), ] operations = [ migrations.AddField( model_name='attempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.RunSQL( 'UPDATE attempts_historicalattempt SET submission_date = history_date' ), migrations.RunSQL( '''UPDATE attempts_attempt SET submission_date = subquery.submission_date FROM ( SELECT user_id, part_id, max(history_date) AS submission_date FROM attempts_historicalattempt GROUP BY user_id, part_id ) AS subquery WHERE attempts_attempt.user_id = subquery.user_id AND attempts_attempt.part_id = subquery.part_id ''' ), migrations.AlterField( model_name='attempt', name='submission_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(blank=True, editable=False), ), ]
Revert "Make migration SQLite compatible"
Revert "Make migration SQLite compatible" This reverts commit 768d85cccb17c8757dd8d14dad220d0b87568264.
Python
agpl-3.0
ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,matijapretnar/projekt-tomo,ul-fmf/projekt-tomo,matijapretnar/projekt-tomo
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attempts', '0007_auto_20161004_0927'), ] operations = [ migrations.AddField( model_name='attempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.RunSQL( 'UPDATE attempts_historicalattempt SET submission_date = history_date' ), migrations.RunSQL( '''UPDATE attempts_attempt - SET submission_date = ( + SET submission_date = subquery.submission_date - SELECT max(history_date) + FROM ( + SELECT user_id, part_id, max(history_date) AS submission_date FROM attempts_historicalattempt + GROUP BY user_id, part_id + ) AS subquery - WHERE attempts_attempt.user_id = user_id + WHERE attempts_attempt.user_id = subquery.user_id - AND attempts_attempt.part_id = part_id + AND attempts_attempt.part_id = subquery.part_id - ) ''' ), migrations.AlterField( model_name='attempt', name='submission_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(blank=True, editable=False), ), ]
Revert "Make migration SQLite compatible"
## Code Before: from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attempts', '0007_auto_20161004_0927'), ] operations = [ migrations.AddField( model_name='attempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.RunSQL( 'UPDATE attempts_historicalattempt SET submission_date = history_date' ), migrations.RunSQL( '''UPDATE attempts_attempt SET submission_date = ( SELECT max(history_date) FROM attempts_historicalattempt WHERE attempts_attempt.user_id = user_id AND attempts_attempt.part_id = part_id ) ''' ), migrations.AlterField( model_name='attempt', name='submission_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(blank=True, editable=False), ), ] ## Instruction: Revert "Make migration SQLite compatible" ## Code After: from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('attempts', '0007_auto_20161004_0927'), ] operations = [ migrations.AddField( model_name='attempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.AddField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(null=True), ), migrations.RunSQL( 'UPDATE attempts_historicalattempt SET submission_date = history_date' ), migrations.RunSQL( '''UPDATE attempts_attempt SET submission_date = subquery.submission_date FROM ( SELECT user_id, part_id, max(history_date) AS submission_date FROM attempts_historicalattempt GROUP BY user_id, part_id ) AS subquery WHERE attempts_attempt.user_id = subquery.user_id AND attempts_attempt.part_id = subquery.part_id ''' ), migrations.AlterField( model_name='attempt', name='submission_date', field=models.DateTimeField(auto_now=True), ), migrations.AlterField( model_name='historicalattempt', name='submission_date', field=models.DateTimeField(blank=True, editable=False), ), ]
# ... existing code ... '''UPDATE attempts_attempt SET submission_date = subquery.submission_date FROM ( SELECT user_id, part_id, max(history_date) AS submission_date FROM attempts_historicalattempt GROUP BY user_id, part_id ) AS subquery WHERE attempts_attempt.user_id = subquery.user_id AND attempts_attempt.part_id = subquery.part_id ''' # ... rest of the code ...
228b53836e9569fa901de341d7486f85152e67f9
txircd/modules/rfc/cmode_t.py
txircd/modules/rfc/cmode_t.py
from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class TopicLockMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "TopicLockMode" core = True affectedActions = { "commandpermission-TOPIC": 10 } def channelModes(self): return [ ("t", ModeType.NoParam, self) ] def actions(self): return [ ("modeactioncheck-channel-t-commandpermission-TOPIC", 10, self.channelHasMode) ] def channelHasMode(self, channel, user, data): if "t" in channel.modes: return "" return None def apply(self, actionType, channel, param, user, data): if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to change the topic on this channel") return False return None topicLockMode = TopicLockMode()
from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class TopicLockMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "TopicLockMode" core = True affectedActions = { "commandpermission-TOPIC": 10 } def channelModes(self): return [ ("t", ModeType.NoParam, self) ] def actions(self): return [ ("modeactioncheck-channel-t-commandpermission-TOPIC", 10, self.channelHasMode) ] def channelHasMode(self, channel, user, data): if "t" in channel.modes: return "" return None def apply(self, actionType, channel, param, user, data): if "topic" not in data: return None if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to change the topic on this channel") return False return None topicLockMode = TopicLockMode()
Fix non-chanops not being able to query the topic
Fix non-chanops not being able to query the topic
Python
bsd-3-clause
Heufneutje/txircd,ElementalAlchemist/txircd
from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class TopicLockMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "TopicLockMode" core = True affectedActions = { "commandpermission-TOPIC": 10 } def channelModes(self): return [ ("t", ModeType.NoParam, self) ] def actions(self): return [ ("modeactioncheck-channel-t-commandpermission-TOPIC", 10, self.channelHasMode) ] def channelHasMode(self, channel, user, data): if "t" in channel.modes: return "" return None def apply(self, actionType, channel, param, user, data): + if "topic" not in data: + return None if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to change the topic on this channel") return False return None topicLockMode = TopicLockMode()
Fix non-chanops not being able to query the topic
## Code Before: from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class TopicLockMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "TopicLockMode" core = True affectedActions = { "commandpermission-TOPIC": 10 } def channelModes(self): return [ ("t", ModeType.NoParam, self) ] def actions(self): return [ ("modeactioncheck-channel-t-commandpermission-TOPIC", 10, self.channelHasMode) ] def channelHasMode(self, channel, user, data): if "t" in channel.modes: return "" return None def apply(self, actionType, channel, param, user, data): if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to change the topic on this channel") return False return None topicLockMode = TopicLockMode() ## Instruction: Fix non-chanops not being able to query the topic ## Code After: from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import IMode, IModuleData, Mode, ModuleData from txircd.utils import ModeType from zope.interface import implements class TopicLockMode(ModuleData, Mode): implements(IPlugin, IModuleData, IMode) name = "TopicLockMode" core = True affectedActions = { "commandpermission-TOPIC": 10 } def channelModes(self): return [ ("t", ModeType.NoParam, self) ] def actions(self): return [ ("modeactioncheck-channel-t-commandpermission-TOPIC", 10, self.channelHasMode) ] def channelHasMode(self, channel, user, data): if "t" in channel.modes: return "" return None def apply(self, actionType, channel, param, user, data): if "topic" not in data: return None if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): user.sendMessage(irc.ERR_CHANOPRIVSNEEDED, channel.name, "You do not have access to change the topic on this channel") return False return None topicLockMode = TopicLockMode()
// ... existing code ... def apply(self, actionType, channel, param, user, data): if "topic" not in data: return None if not self.ircd.runActionUntilValue("checkchannellevel", "topic", channel, user): // ... rest of the code ...
f29477416729df9cc198f679a2478f6a077ce365
app/util.py
app/util.py
import os from typing import Any, Callable SHOULD_CACHE = os.environ.get('ENV', 'development') == 'production' def cached_function(func: Callable[..., Any]) -> Callable[..., Any]: data = {} def wrapper(*args: Any) -> Any: if not SHOULD_CACHE: return func(*args) cache_key = ' '.join([str(x) for x in args]) if cache_key not in data: data[cache_key] = func(*args) return data[cache_key] wrapper.__qualname__ = func.__qualname__ return wrapper
import inspect import os from typing import Any, Callable SHOULD_CACHE = os.environ.get('ENV', 'development') == 'production' def cached_function(func: Callable[..., Any]) -> Callable[..., Any]: data = {} def wrapper(*args: Any) -> Any: if not SHOULD_CACHE: return func(*args) cache_key = ' '.join([str(x) for x in args]) if cache_key not in data: data[cache_key] = func(*args) return data[cache_key] wrapper.__qualname__ = func.__qualname__ wrapper.__signature__ = inspect.signature(func) # type: ignore return wrapper
Make cached_function not overwrite signature of wrapped function
Make cached_function not overwrite signature of wrapped function
Python
mit
albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com,albertyw/albertyw.com
+ import inspect import os from typing import Any, Callable SHOULD_CACHE = os.environ.get('ENV', 'development') == 'production' def cached_function(func: Callable[..., Any]) -> Callable[..., Any]: data = {} def wrapper(*args: Any) -> Any: if not SHOULD_CACHE: return func(*args) cache_key = ' '.join([str(x) for x in args]) if cache_key not in data: data[cache_key] = func(*args) return data[cache_key] wrapper.__qualname__ = func.__qualname__ + wrapper.__signature__ = inspect.signature(func) # type: ignore return wrapper
Make cached_function not overwrite signature of wrapped function
## Code Before: import os from typing import Any, Callable SHOULD_CACHE = os.environ.get('ENV', 'development') == 'production' def cached_function(func: Callable[..., Any]) -> Callable[..., Any]: data = {} def wrapper(*args: Any) -> Any: if not SHOULD_CACHE: return func(*args) cache_key = ' '.join([str(x) for x in args]) if cache_key not in data: data[cache_key] = func(*args) return data[cache_key] wrapper.__qualname__ = func.__qualname__ return wrapper ## Instruction: Make cached_function not overwrite signature of wrapped function ## Code After: import inspect import os from typing import Any, Callable SHOULD_CACHE = os.environ.get('ENV', 'development') == 'production' def cached_function(func: Callable[..., Any]) -> Callable[..., Any]: data = {} def wrapper(*args: Any) -> Any: if not SHOULD_CACHE: return func(*args) cache_key = ' '.join([str(x) for x in args]) if cache_key not in data: data[cache_key] = func(*args) return data[cache_key] wrapper.__qualname__ = func.__qualname__ wrapper.__signature__ = inspect.signature(func) # type: ignore return wrapper
# ... existing code ... import inspect import os # ... modified code ... wrapper.__qualname__ = func.__qualname__ wrapper.__signature__ = inspect.signature(func) # type: ignore return wrapper # ... rest of the code ...
730c7e6982f737c166924e1cae73eb34024fc4ef
AWSLambdas/vote.py
AWSLambdas/vote.py
import json import boto3 import time import decimal import base64 from boto3.dynamodb.conditions import Key, Attr def consolidate_disposition(disposition_map, records): for record in records: type = record['eventName'] disposition = 0 if type == "INSERT" or type == "MODIFY": disposition = int(record['dynamodb']['NewImage']['vote']['N']) if type == "MODIFY" or type == "REMOVE": disposition += -int(record['dynamodb']['OldImage']['vote']['N']) sample = record['dynamodb']['Keys']['sample']['B'] disposition_map[sample] = disposition_map.get(sample, 0) + disposition def vote_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('Samples') ratings = dict() consolidate_disposition(ratings, event['Records']) for (sample, vote) in ratings.iteritems(): ident = sample[0:19] event = base64.standard_b64decode(sample[18:]) print ident print event
import json import boto3 import time import decimal import base64 from boto3.dynamodb.conditions import Key, Attr from decimal import Decimal def consolidate_disposition(disposition_map, records): for record in records: type = record['eventName'] disposition = 0 if type == "INSERT" or type == "MODIFY": disposition = int(record['dynamodb']['NewImage']['vote']['N']) if type == "MODIFY" or type == "REMOVE": disposition += -int(record['dynamodb']['OldImage']['vote']['N']) sample = record['dynamodb']['Keys']['sample']['B'] disposition_map[sample] = disposition_map.get(sample, 0) + disposition def vote_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('Samples') ratings = dict() consolidate_disposition(ratings, event['Records']) for (sample, vote) in ratings.iteritems(): ident = "b74z7/Q1TdqouIVyIXp+DQU=" """sample[0:19]""" event = "ChIJ1QvXETf7Z0sRBkcNQqQ89ag" """base64.standard_b64decode(sample[18:])""" up = 1 down = -1 table.update_item( Key={'event': event, 'id': ident}, UpdateExpression='ADD ups :up, downs :down', ExpressionAttributeValues={':up':{'N': up}, ':down':{'N': down}} )
Update the ups and downs members of the Samples items.
Update the ups and downs members of the Samples items.
Python
mit
SandcastleApps/partyup,SandcastleApps/partyup,SandcastleApps/partyup
import json import boto3 import time import decimal import base64 from boto3.dynamodb.conditions import Key, Attr + from decimal import Decimal def consolidate_disposition(disposition_map, records): for record in records: type = record['eventName'] disposition = 0 if type == "INSERT" or type == "MODIFY": disposition = int(record['dynamodb']['NewImage']['vote']['N']) if type == "MODIFY" or type == "REMOVE": disposition += -int(record['dynamodb']['OldImage']['vote']['N']) sample = record['dynamodb']['Keys']['sample']['B'] disposition_map[sample] = disposition_map.get(sample, 0) + disposition def vote_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('Samples') ratings = dict() consolidate_disposition(ratings, event['Records']) - + for (sample, vote) in ratings.iteritems(): - ident = sample[0:19] + ident = "b74z7/Q1TdqouIVyIXp+DQU=" """sample[0:19]""" - event = base64.standard_b64decode(sample[18:]) + event = "ChIJ1QvXETf7Z0sRBkcNQqQ89ag" """base64.standard_b64decode(sample[18:])""" - print ident - print event - + up = 1 + down = -1 + table.update_item( + Key={'event': event, 'id': ident}, + UpdateExpression='ADD ups :up, downs :down', + ExpressionAttributeValues={':up':{'N': up}, ':down':{'N': down}} + )
Update the ups and downs members of the Samples items.
## Code Before: import json import boto3 import time import decimal import base64 from boto3.dynamodb.conditions import Key, Attr def consolidate_disposition(disposition_map, records): for record in records: type = record['eventName'] disposition = 0 if type == "INSERT" or type == "MODIFY": disposition = int(record['dynamodb']['NewImage']['vote']['N']) if type == "MODIFY" or type == "REMOVE": disposition += -int(record['dynamodb']['OldImage']['vote']['N']) sample = record['dynamodb']['Keys']['sample']['B'] disposition_map[sample] = disposition_map.get(sample, 0) + disposition def vote_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('Samples') ratings = dict() consolidate_disposition(ratings, event['Records']) for (sample, vote) in ratings.iteritems(): ident = sample[0:19] event = base64.standard_b64decode(sample[18:]) print ident print event ## Instruction: Update the ups and downs members of the Samples items. ## Code After: import json import boto3 import time import decimal import base64 from boto3.dynamodb.conditions import Key, Attr from decimal import Decimal def consolidate_disposition(disposition_map, records): for record in records: type = record['eventName'] disposition = 0 if type == "INSERT" or type == "MODIFY": disposition = int(record['dynamodb']['NewImage']['vote']['N']) if type == "MODIFY" or type == "REMOVE": disposition += -int(record['dynamodb']['OldImage']['vote']['N']) sample = record['dynamodb']['Keys']['sample']['B'] disposition_map[sample] = disposition_map.get(sample, 0) + disposition def vote_handler(event, context): dynamodb = boto3.resource('dynamodb') table = dynamodb.Table('Samples') ratings = dict() consolidate_disposition(ratings, event['Records']) for (sample, vote) in ratings.iteritems(): ident = "b74z7/Q1TdqouIVyIXp+DQU=" """sample[0:19]""" event = "ChIJ1QvXETf7Z0sRBkcNQqQ89ag" """base64.standard_b64decode(sample[18:])""" up = 1 down = -1 table.update_item( Key={'event': event, 'id': ident}, UpdateExpression='ADD ups :up, downs :down', ExpressionAttributeValues={':up':{'N': up}, ':down':{'N': down}} )
// ... existing code ... from boto3.dynamodb.conditions import Key, Attr from decimal import Decimal // ... modified code ... consolidate_disposition(ratings, event['Records']) for (sample, vote) in ratings.iteritems(): ident = "b74z7/Q1TdqouIVyIXp+DQU=" """sample[0:19]""" event = "ChIJ1QvXETf7Z0sRBkcNQqQ89ag" """base64.standard_b64decode(sample[18:])""" up = 1 down = -1 table.update_item( Key={'event': event, 'id': ident}, UpdateExpression='ADD ups :up, downs :down', ExpressionAttributeValues={':up':{'N': up}, ':down':{'N': down}} ) // ... rest of the code ...
f19fb3bf2eb90f77b193dc25f5b8bec0dfd253bc
fabtools/require/mysql.py
fabtools/require/mysql.py
from __future__ import with_statement from fabtools.mysql import * from fabtools.deb import is_installed, preseed_package from fabtools.require.deb import package from fabtools.require.service import started def server(version='5.1', password=None): """ Require a MySQL server """ if not is_installed("mysql-server-%s" % version): if password is None: password = prompt_password() with settings(hide('running')): preseed_package('mysql-server', { 'mysql-server/root_password': ('password', password), 'mysql-server/root_password_again': ('password', password), }) package('mysql-server-%s' % version) started('mysql') def user(name, password, **kwargs): """ Require a MySQL user """ if not user_exists(name, **kwargs): create_user(name, password, **kwargs) def database(name, **kwargs): """ Require a MySQL database """ if not database_exists(name, **kwargs): create_database(name, **kwargs)
from __future__ import with_statement from fabtools.mysql import * from fabtools.deb import is_installed, preseed_package from fabtools.require.deb import package from fabtools.require.service import started def server(version=None, password=None): """ Require a MySQL server """ if version: pkg_name = 'mysql-server-%s' % version else: pkg_name = 'mysql-server' if not is_installed(pkg_name): if password is None: password = prompt_password() with settings(hide('running')): preseed_package('mysql-server', { 'mysql-server/root_password': ('password', password), 'mysql-server/root_password_again': ('password', password), }) package(pkg_name) started('mysql') def user(name, password, **kwargs): """ Require a MySQL user """ if not user_exists(name, **kwargs): create_user(name, password, **kwargs) def database(name, **kwargs): """ Require a MySQL database """ if not database_exists(name, **kwargs): create_database(name, **kwargs)
Fix require MySQL server on Ubuntu 12.04 LTS
Fix require MySQL server on Ubuntu 12.04 LTS
Python
bsd-2-clause
pombredanne/fabtools,ronnix/fabtools,fabtools/fabtools,badele/fabtools,sociateru/fabtools,davidcaste/fabtools,pahaz/fabtools,n0n0x/fabtools-python,ahnjungho/fabtools,hagai26/fabtools,prologic/fabtools,AMOSoft/fabtools,bitmonk/fabtools,wagigi/fabtools-python
from __future__ import with_statement from fabtools.mysql import * from fabtools.deb import is_installed, preseed_package from fabtools.require.deb import package from fabtools.require.service import started - def server(version='5.1', password=None): + def server(version=None, password=None): """ Require a MySQL server """ - if not is_installed("mysql-server-%s" % version): + if version: + pkg_name = 'mysql-server-%s' % version + else: + pkg_name = 'mysql-server' + + if not is_installed(pkg_name): if password is None: password = prompt_password() with settings(hide('running')): preseed_package('mysql-server', { 'mysql-server/root_password': ('password', password), 'mysql-server/root_password_again': ('password', password), }) - package('mysql-server-%s' % version) + package(pkg_name) started('mysql') def user(name, password, **kwargs): """ Require a MySQL user """ if not user_exists(name, **kwargs): create_user(name, password, **kwargs) def database(name, **kwargs): """ Require a MySQL database """ if not database_exists(name, **kwargs): create_database(name, **kwargs)
Fix require MySQL server on Ubuntu 12.04 LTS
## Code Before: from __future__ import with_statement from fabtools.mysql import * from fabtools.deb import is_installed, preseed_package from fabtools.require.deb import package from fabtools.require.service import started def server(version='5.1', password=None): """ Require a MySQL server """ if not is_installed("mysql-server-%s" % version): if password is None: password = prompt_password() with settings(hide('running')): preseed_package('mysql-server', { 'mysql-server/root_password': ('password', password), 'mysql-server/root_password_again': ('password', password), }) package('mysql-server-%s' % version) started('mysql') def user(name, password, **kwargs): """ Require a MySQL user """ if not user_exists(name, **kwargs): create_user(name, password, **kwargs) def database(name, **kwargs): """ Require a MySQL database """ if not database_exists(name, **kwargs): create_database(name, **kwargs) ## Instruction: Fix require MySQL server on Ubuntu 12.04 LTS ## Code After: from __future__ import with_statement from fabtools.mysql import * from fabtools.deb import is_installed, preseed_package from fabtools.require.deb import package from fabtools.require.service import started def server(version=None, password=None): """ Require a MySQL server """ if version: pkg_name = 'mysql-server-%s' % version else: pkg_name = 'mysql-server' if not is_installed(pkg_name): if password is None: password = prompt_password() with settings(hide('running')): preseed_package('mysql-server', { 'mysql-server/root_password': ('password', password), 'mysql-server/root_password_again': ('password', password), }) package(pkg_name) started('mysql') def user(name, password, **kwargs): """ Require a MySQL user """ if not user_exists(name, **kwargs): create_user(name, password, **kwargs) def database(name, **kwargs): """ Require a MySQL database """ if not database_exists(name, **kwargs): create_database(name, **kwargs)
# ... existing code ... def server(version=None, password=None): """ # ... modified code ... """ if version: pkg_name = 'mysql-server-%s' % version else: pkg_name = 'mysql-server' if not is_installed(pkg_name): if password is None: ... package(pkg_name) # ... rest of the code ...
8bc2b19e9aef410832555fb9962c243f0d4aef96
brink/decorators.py
brink/decorators.py
def require_request_model(cls, *args, validate=True, **kwargs): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, *args, model, **kwargs) return new_handler return decorator
import asyncio def require_request_model(cls, *args, validate=True, **kwargs): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, *args, model, **kwargs) return new_handler return decorator def use_ws_subhandlers(handler): """ Allows the handler to return any number of **subhandlers** that will be run in parallel. This makes it much cleaner and easier to write a handler that both listens for incoming messages on the socket connection, while also watching a changefeed from RethinkDB. Example usage :: @use_ws_subhandlers async def handle_feed(request, ws): async def handle_incoming(_, ws): async for msg in ws: await Item(value=msg.data).save() async def handle_change(_, ws): async for item in await Item.changes(): ws.send_json(item) return [handle_incoming, handle_change] """ async def new_handler(request, ws): handlers = await handler(request, ws) tasks = [request.app.loop.create_task(h(request, ws)) for h in handlers] try: await asyncio.gather(*tasks) finally: for task in tasks: task.cancel() await ws.close() return new_handler
Add decorator for using websocket subhandlers
Add decorator for using websocket subhandlers
Python
bsd-3-clause
brinkframework/brink
+ import asyncio + + def require_request_model(cls, *args, validate=True, **kwargs): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, *args, model, **kwargs) return new_handler return decorator + + def use_ws_subhandlers(handler): + """ + Allows the handler to return any number of **subhandlers** that will be + run in parallel. This makes it much cleaner and easier to write a handler + that both listens for incoming messages on the socket connection, while + also watching a changefeed from RethinkDB. + + Example usage :: + + @use_ws_subhandlers + async def handle_feed(request, ws): + async def handle_incoming(_, ws): + async for msg in ws: + await Item(value=msg.data).save() + + async def handle_change(_, ws): + async for item in await Item.changes(): + ws.send_json(item) + + return [handle_incoming, handle_change] + """ + async def new_handler(request, ws): + handlers = await handler(request, ws) + tasks = [request.app.loop.create_task(h(request, ws)) + for h in handlers] + + try: + await asyncio.gather(*tasks) + finally: + for task in tasks: + task.cancel() + + await ws.close() + return new_handler +
Add decorator for using websocket subhandlers
## Code Before: def require_request_model(cls, *args, validate=True, **kwargs): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, *args, model, **kwargs) return new_handler return decorator ## Instruction: Add decorator for using websocket subhandlers ## Code After: import asyncio def require_request_model(cls, *args, validate=True, **kwargs): """ Makes a handler require that a request body that map towards the given model is provided. Unless the ``validate`` option is set to ``False`` the data will be validated against the model's fields. The model will be passed to the handler as the last positional argument. :: @require_request_model(Model) async def handle_model(request, model): return 200, model """ def decorator(handler): async def new_handler(request): body = await request.json() model = cls(**body) if validate: model.validate() return await handler(request, *args, model, **kwargs) return new_handler return decorator def use_ws_subhandlers(handler): """ Allows the handler to return any number of **subhandlers** that will be run in parallel. This makes it much cleaner and easier to write a handler that both listens for incoming messages on the socket connection, while also watching a changefeed from RethinkDB. Example usage :: @use_ws_subhandlers async def handle_feed(request, ws): async def handle_incoming(_, ws): async for msg in ws: await Item(value=msg.data).save() async def handle_change(_, ws): async for item in await Item.changes(): ws.send_json(item) return [handle_incoming, handle_change] """ async def new_handler(request, ws): handlers = await handler(request, ws) tasks = [request.app.loop.create_task(h(request, ws)) for h in handlers] try: await asyncio.gather(*tasks) finally: for task in tasks: task.cancel() await ws.close() return new_handler
# ... existing code ... import asyncio def require_request_model(cls, *args, validate=True, **kwargs): # ... modified code ... return decorator def use_ws_subhandlers(handler): """ Allows the handler to return any number of **subhandlers** that will be run in parallel. This makes it much cleaner and easier to write a handler that both listens for incoming messages on the socket connection, while also watching a changefeed from RethinkDB. Example usage :: @use_ws_subhandlers async def handle_feed(request, ws): async def handle_incoming(_, ws): async for msg in ws: await Item(value=msg.data).save() async def handle_change(_, ws): async for item in await Item.changes(): ws.send_json(item) return [handle_incoming, handle_change] """ async def new_handler(request, ws): handlers = await handler(request, ws) tasks = [request.app.loop.create_task(h(request, ws)) for h in handlers] try: await asyncio.gather(*tasks) finally: for task in tasks: task.cancel() await ws.close() return new_handler # ... rest of the code ...
38a2d86aed4ea1e94691993c5f49722f9a69ac8d
lisa/__init__.py
lisa/__init__.py
import warnings import os import sys from lisa.version import __version__ # Raise an exception when a deprecated API is used from within a lisa.* # submodule. This ensures that we don't use any deprecated APIs internally, so # they are only kept for external backward compatibility purposes. warnings.filterwarnings( action='error', category=DeprecationWarning, module=r'{}\..*'.format(__name__), ) # When the deprecated APIs are used from __main__ (script or notebook), always # show the warning warnings.filterwarnings( action='always', category=DeprecationWarning, module=r'__main__', ) # Prevent matplotlib from trying to connect to X11 server, for headless testing. # Must be done before importing matplotlib.pyplot or pylab try: import matplotlib except ImportError: pass else: if not os.getenv('DISPLAY'): matplotlib.use('Agg') if sys.version_info < (3, 6): warnings.warn( 'Python 3.6 will soon be required to run LISA, please upgrade from {} to any version higher than 3.6'.format( '.'.join( map(str, tuple(sys.version_info)[:3]) ), ), DeprecationWarning, ) # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
import warnings import os import sys from lisa.version import __version__ # Raise an exception when a deprecated API is used from within a lisa.* # submodule. This ensures that we don't use any deprecated APIs internally, so # they are only kept for external backward compatibility purposes. warnings.filterwarnings( action='error', category=DeprecationWarning, module=r'{}\..*'.format(__name__), ) # When the deprecated APIs are used from __main__ (script or notebook), always # show the warning warnings.filterwarnings( action='always', category=DeprecationWarning, module=r'__main__', ) # Prevent matplotlib from trying to connect to X11 server, for headless testing. # Must be done before importing matplotlib.pyplot or pylab try: import matplotlib except ImportError: pass else: if not os.getenv('DISPLAY'): matplotlib.use('Agg') # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
Remove Python < 3.6 version check
lisa: Remove Python < 3.6 version check Since Python >= 3.6 is now mandatory, remove the check and the warning.
Python
apache-2.0
ARM-software/lisa,credp/lisa,credp/lisa,credp/lisa,credp/lisa,ARM-software/lisa,ARM-software/lisa,ARM-software/lisa
import warnings import os import sys from lisa.version import __version__ # Raise an exception when a deprecated API is used from within a lisa.* # submodule. This ensures that we don't use any deprecated APIs internally, so # they are only kept for external backward compatibility purposes. warnings.filterwarnings( action='error', category=DeprecationWarning, module=r'{}\..*'.format(__name__), ) # When the deprecated APIs are used from __main__ (script or notebook), always # show the warning warnings.filterwarnings( action='always', category=DeprecationWarning, module=r'__main__', ) # Prevent matplotlib from trying to connect to X11 server, for headless testing. # Must be done before importing matplotlib.pyplot or pylab try: import matplotlib except ImportError: pass else: if not os.getenv('DISPLAY'): matplotlib.use('Agg') - if sys.version_info < (3, 6): - warnings.warn( - 'Python 3.6 will soon be required to run LISA, please upgrade from {} to any version higher than 3.6'.format( - '.'.join( - map(str, tuple(sys.version_info)[:3]) - ), - ), - DeprecationWarning, - ) - # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
Remove Python < 3.6 version check
## Code Before: import warnings import os import sys from lisa.version import __version__ # Raise an exception when a deprecated API is used from within a lisa.* # submodule. This ensures that we don't use any deprecated APIs internally, so # they are only kept for external backward compatibility purposes. warnings.filterwarnings( action='error', category=DeprecationWarning, module=r'{}\..*'.format(__name__), ) # When the deprecated APIs are used from __main__ (script or notebook), always # show the warning warnings.filterwarnings( action='always', category=DeprecationWarning, module=r'__main__', ) # Prevent matplotlib from trying to connect to X11 server, for headless testing. # Must be done before importing matplotlib.pyplot or pylab try: import matplotlib except ImportError: pass else: if not os.getenv('DISPLAY'): matplotlib.use('Agg') if sys.version_info < (3, 6): warnings.warn( 'Python 3.6 will soon be required to run LISA, please upgrade from {} to any version higher than 3.6'.format( '.'.join( map(str, tuple(sys.version_info)[:3]) ), ), DeprecationWarning, ) # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab ## Instruction: Remove Python < 3.6 version check ## Code After: import warnings import os import sys from lisa.version import __version__ # Raise an exception when a deprecated API is used from within a lisa.* # submodule. This ensures that we don't use any deprecated APIs internally, so # they are only kept for external backward compatibility purposes. warnings.filterwarnings( action='error', category=DeprecationWarning, module=r'{}\..*'.format(__name__), ) # When the deprecated APIs are used from __main__ (script or notebook), always # show the warning warnings.filterwarnings( action='always', category=DeprecationWarning, module=r'__main__', ) # Prevent matplotlib from trying to connect to X11 server, for headless testing. # Must be done before importing matplotlib.pyplot or pylab try: import matplotlib except ImportError: pass else: if not os.getenv('DISPLAY'): matplotlib.use('Agg') # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
# ... existing code ... # vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab # ... rest of the code ...
ab4333ad10713b0df25e0ff9bb46da3a0749326f
analyser/tasks.py
analyser/tasks.py
import os import time import requests from krunchr.vendors.celery import celery @celery.task def get_file(url, path): name, ext = os.path.splitext(url) name = str(int(time.time())) path = "%s/%s%s" % (path, name, ext) response = requests.get(url) print path with open(path, 'w') as f: f.write(response.content)
import os import time import rethinkdb as r import requests from krunchr.vendors.celery import celery, db @celery.task(bind=True) def get_file(self, url, path): name, ext = os.path.splitext(url) name = str(int(time.time())) path = "%s/%s%s" % (path, name, ext) response = requests.get(url) with open(path, 'w') as f: f.write(response.content) r.table('jobs').filter({ 'task_id': self.request.id }).update({'state': 'done'}).run(db)
Update job state when we finish the task
Update job state when we finish the task
Python
apache-2.0
vtemian/kruncher
import os import time + import rethinkdb as r import requests - from krunchr.vendors.celery import celery + from krunchr.vendors.celery import celery, db - @celery.task + @celery.task(bind=True) - def get_file(url, path): + def get_file(self, url, path): name, ext = os.path.splitext(url) name = str(int(time.time())) path = "%s/%s%s" % (path, name, ext) response = requests.get(url) - print path with open(path, 'w') as f: f.write(response.content) + r.table('jobs').filter({ + 'task_id': self.request.id + }).update({'state': 'done'}).run(db) +
Update job state when we finish the task
## Code Before: import os import time import requests from krunchr.vendors.celery import celery @celery.task def get_file(url, path): name, ext = os.path.splitext(url) name = str(int(time.time())) path = "%s/%s%s" % (path, name, ext) response = requests.get(url) print path with open(path, 'w') as f: f.write(response.content) ## Instruction: Update job state when we finish the task ## Code After: import os import time import rethinkdb as r import requests from krunchr.vendors.celery import celery, db @celery.task(bind=True) def get_file(self, url, path): name, ext = os.path.splitext(url) name = str(int(time.time())) path = "%s/%s%s" % (path, name, ext) response = requests.get(url) with open(path, 'w') as f: f.write(response.content) r.table('jobs').filter({ 'task_id': self.request.id }).update({'state': 'done'}).run(db)
# ... existing code ... import rethinkdb as r import requests # ... modified code ... from krunchr.vendors.celery import celery, db ... @celery.task(bind=True) def get_file(self, url, path): name, ext = os.path.splitext(url) ... response = requests.get(url) with open(path, 'w') as f: ... f.write(response.content) r.table('jobs').filter({ 'task_id': self.request.id }).update({'state': 'done'}).run(db) # ... rest of the code ...
9c8dbde9b39f6fcd713a7d118dcd613cc48cf54e
astropy/tests/tests/test_run_tests.py
astropy/tests/tests/test_run_tests.py
from __future__ import (absolute_import, division, print_function, unicode_literals) # test helper.run_tests function import sys from .. import helper from ... import _get_test_runner from .. helper import pytest # run_tests should raise ValueError when asked to run on a module it can't find def test_module_not_found(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests('fake.module') # run_tests should raise ValueError when passed an invalid pastebin= option def test_pastebin_keyword(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests(pastebin='not_an_option') # tests that tests are only run in Python 3 out of the 2to3'd build (otherwise # a syntax error would occur) try: from .run_after_2to3 import test_run_after_2to3 except SyntaxError: def test_run_after_2to3(): helper.pytest.fail("Not running the 2to3'd tests!") def test_deprecation_warning(): if sys.version_info[:2] == (3, 3): with pytest.raises(DeprecationWarning): '{0:s}'.format(object())
from __future__ import (absolute_import, division, print_function, unicode_literals) # test helper.run_tests function import warnings from .. import helper from ... import _get_test_runner from .. helper import pytest # run_tests should raise ValueError when asked to run on a module it can't find def test_module_not_found(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests('fake.module') # run_tests should raise ValueError when passed an invalid pastebin= option def test_pastebin_keyword(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests(pastebin='not_an_option') # tests that tests are only run in Python 3 out of the 2to3'd build (otherwise # a syntax error would occur) try: from .run_after_2to3 import test_run_after_2to3 except SyntaxError: def test_run_after_2to3(): helper.pytest.fail("Not running the 2to3'd tests!") def test_deprecation_warning(): with pytest.raises(DeprecationWarning): warnings.warn('test warning', DeprecationWarning)
Test that deprecation exceptions are working differently, after suggestion by @embray
Test that deprecation exceptions are working differently, after suggestion by @embray
Python
bsd-3-clause
larrybradley/astropy,aleksandr-bakanov/astropy,DougBurke/astropy,stargaser/astropy,DougBurke/astropy,joergdietrich/astropy,kelle/astropy,mhvk/astropy,funbaker/astropy,saimn/astropy,lpsinger/astropy,pllim/astropy,dhomeier/astropy,StuartLittlefair/astropy,lpsinger/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,dhomeier/astropy,mhvk/astropy,kelle/astropy,saimn/astropy,larrybradley/astropy,StuartLittlefair/astropy,dhomeier/astropy,bsipocz/astropy,astropy/astropy,kelle/astropy,MSeifert04/astropy,funbaker/astropy,astropy/astropy,DougBurke/astropy,tbabej/astropy,tbabej/astropy,stargaser/astropy,AustereCuriosity/astropy,saimn/astropy,lpsinger/astropy,joergdietrich/astropy,joergdietrich/astropy,StuartLittlefair/astropy,astropy/astropy,AustereCuriosity/astropy,bsipocz/astropy,saimn/astropy,astropy/astropy,stargaser/astropy,funbaker/astropy,stargaser/astropy,DougBurke/astropy,mhvk/astropy,astropy/astropy,bsipocz/astropy,dhomeier/astropy,MSeifert04/astropy,joergdietrich/astropy,kelle/astropy,joergdietrich/astropy,kelle/astropy,larrybradley/astropy,AustereCuriosity/astropy,StuartLittlefair/astropy,lpsinger/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,tbabej/astropy,aleksandr-bakanov/astropy,pllim/astropy,pllim/astropy,larrybradley/astropy,pllim/astropy,AustereCuriosity/astropy,funbaker/astropy,mhvk/astropy,bsipocz/astropy,tbabej/astropy,pllim/astropy,saimn/astropy,MSeifert04/astropy,MSeifert04/astropy,tbabej/astropy,mhvk/astropy,AustereCuriosity/astropy,lpsinger/astropy
from __future__ import (absolute_import, division, print_function, unicode_literals) # test helper.run_tests function - import sys + import warnings from .. import helper from ... import _get_test_runner from .. helper import pytest # run_tests should raise ValueError when asked to run on a module it can't find def test_module_not_found(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests('fake.module') # run_tests should raise ValueError when passed an invalid pastebin= option def test_pastebin_keyword(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests(pastebin='not_an_option') # tests that tests are only run in Python 3 out of the 2to3'd build (otherwise # a syntax error would occur) try: from .run_after_2to3 import test_run_after_2to3 except SyntaxError: def test_run_after_2to3(): helper.pytest.fail("Not running the 2to3'd tests!") def test_deprecation_warning(): - if sys.version_info[:2] == (3, 3): - with pytest.raises(DeprecationWarning): + with pytest.raises(DeprecationWarning): - '{0:s}'.format(object()) + warnings.warn('test warning', DeprecationWarning)
Test that deprecation exceptions are working differently, after suggestion by @embray
## Code Before: from __future__ import (absolute_import, division, print_function, unicode_literals) # test helper.run_tests function import sys from .. import helper from ... import _get_test_runner from .. helper import pytest # run_tests should raise ValueError when asked to run on a module it can't find def test_module_not_found(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests('fake.module') # run_tests should raise ValueError when passed an invalid pastebin= option def test_pastebin_keyword(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests(pastebin='not_an_option') # tests that tests are only run in Python 3 out of the 2to3'd build (otherwise # a syntax error would occur) try: from .run_after_2to3 import test_run_after_2to3 except SyntaxError: def test_run_after_2to3(): helper.pytest.fail("Not running the 2to3'd tests!") def test_deprecation_warning(): if sys.version_info[:2] == (3, 3): with pytest.raises(DeprecationWarning): '{0:s}'.format(object()) ## Instruction: Test that deprecation exceptions are working differently, after suggestion by @embray ## Code After: from __future__ import (absolute_import, division, print_function, unicode_literals) # test helper.run_tests function import warnings from .. import helper from ... import _get_test_runner from .. helper import pytest # run_tests should raise ValueError when asked to run on a module it can't find def test_module_not_found(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests('fake.module') # run_tests should raise ValueError when passed an invalid pastebin= option def test_pastebin_keyword(): with helper.pytest.raises(ValueError): _get_test_runner().run_tests(pastebin='not_an_option') # tests that tests are only run in Python 3 out of the 2to3'd build (otherwise # a syntax error would occur) try: from .run_after_2to3 import test_run_after_2to3 except SyntaxError: def test_run_after_2to3(): helper.pytest.fail("Not running the 2to3'd tests!") def test_deprecation_warning(): with pytest.raises(DeprecationWarning): warnings.warn('test warning', DeprecationWarning)
... import warnings ... def test_deprecation_warning(): with pytest.raises(DeprecationWarning): warnings.warn('test warning', DeprecationWarning) ...
bea8123561c24391a6db368773a56a04a1a98fb2
dataprep/dataframe.py
dataprep/dataframe.py
from pyspark.sql import SQLContext, Row lines = sc.textFile("/user/admin/Wikipedia/*") tokens = lines.map(lambda l: l.split("\t")) data = tokens.map(lambda t: Row(year=int(t[0]), month=int(t[1]), day=int(t[2]), hour=int(t[3]), page=t[4], hits=int(t[5]))) sqlContext = SQLContext(sc) wtDataFrame = sqlContext.createDataFrame(data) wtDataFrame.registerTempTable("wt") hitCountsRDD = sqlContext.sql("SELECT hits, COUNT(*) AS c FROM wt GROUP BY hits ORDER BY hits").cache() hitCounts = hitCountsRDD.collect()
from pyspark.sql import SQLContext, Row lines = sc.textFile("/user/admin/Wikipedia/*") def parse_line(line): tokens = line.split('\t') return Row(page=tokens[4], hits=int(tokens[5])) data = lines.map(parse_line) sqlContext = SQLContext(sc) wtDataFrame = sqlContext.createDataFrame(data) wtDataFrame.registerTempTable("wt") hitCountsRDD = sqlContext.sql("SELECT hits, COUNT(*) AS c FROM wt GROUP BY hits ORDER BY hits").cache() hitCounts = hitCountsRDD.collect()
Use parse_line function like in later sections
Use parse_line function like in later sections
Python
apache-2.0
aba1476/ds-for-wall-street,thekovinc/ds-for-wall-street,cdalzell/ds-for-wall-street,nishantyp/ds-for-wall-street
from pyspark.sql import SQLContext, Row lines = sc.textFile("/user/admin/Wikipedia/*") - tokens = lines.map(lambda l: l.split("\t")) - data = tokens.map(lambda t: Row(year=int(t[0]), month=int(t[1]), day=int(t[2]), hour=int(t[3]), page=t[4], hits=int(t[5]))) - + def parse_line(line): + tokens = line.split('\t') + return Row(page=tokens[4], hits=int(tokens[5])) + data = lines.map(parse_line) sqlContext = SQLContext(sc) wtDataFrame = sqlContext.createDataFrame(data) wtDataFrame.registerTempTable("wt") hitCountsRDD = sqlContext.sql("SELECT hits, COUNT(*) AS c FROM wt GROUP BY hits ORDER BY hits").cache() hitCounts = hitCountsRDD.collect()
Use parse_line function like in later sections
## Code Before: from pyspark.sql import SQLContext, Row lines = sc.textFile("/user/admin/Wikipedia/*") tokens = lines.map(lambda l: l.split("\t")) data = tokens.map(lambda t: Row(year=int(t[0]), month=int(t[1]), day=int(t[2]), hour=int(t[3]), page=t[4], hits=int(t[5]))) sqlContext = SQLContext(sc) wtDataFrame = sqlContext.createDataFrame(data) wtDataFrame.registerTempTable("wt") hitCountsRDD = sqlContext.sql("SELECT hits, COUNT(*) AS c FROM wt GROUP BY hits ORDER BY hits").cache() hitCounts = hitCountsRDD.collect() ## Instruction: Use parse_line function like in later sections ## Code After: from pyspark.sql import SQLContext, Row lines = sc.textFile("/user/admin/Wikipedia/*") def parse_line(line): tokens = line.split('\t') return Row(page=tokens[4], hits=int(tokens[5])) data = lines.map(parse_line) sqlContext = SQLContext(sc) wtDataFrame = sqlContext.createDataFrame(data) wtDataFrame.registerTempTable("wt") hitCountsRDD = sqlContext.sql("SELECT hits, COUNT(*) AS c FROM wt GROUP BY hits ORDER BY hits").cache() hitCounts = hitCountsRDD.collect()
... lines = sc.textFile("/user/admin/Wikipedia/*") def parse_line(line): tokens = line.split('\t') return Row(page=tokens[4], hits=int(tokens[5])) data = lines.map(parse_line) ...
92cb843c1b6ada9b63038ed1ce22f83ee6146aff
jazzy/scope.py
jazzy/scope.py
import uuid class Scope: def __init__(self): self.pc = 0; self.variables = {} self.lvalue = self self.rvalue = self self.stack = [1,2,3] self.name = uuid.uuid1() def GetVar(self, name): if name in self.variables: return self.variables[name] else: return None def SetVar(self, name, value): self.variables[name] = value def Step(self): self.pc = self.pc+1 def PC(self,number=None): if number is not None: self.pc = number return self.pc def GetStackTop(self): return self.stack[-1] def LValue(self, newScope = None ): if newScope is not None: self.lvalue = newScope return self.lvalue def RValue(self, newScope = None ): if newScope is not None: self.rvalue = newScope return self.rvalue
import uuid class Scope: def __init__(self): self.pc = 0; self.variables = {} self.lvalue = self self.rvalue = self self.stack = [1,2,3] self.name = uuid.uuid1() def GetVar(self, name): if name in self.variables: return self.variables[name] else: return None def SetVar(self, name, value): self.variables[name] = value def GetAddress(self, name): if name in self.variables: return list(self.variables.keys()).index(name) else: return None def GetVarFromAddress(self, addr): _vars = list(self.variables.keys()) if addr < len(_vars): name = list(self.variables.keys())[addr] return (name, self.variables[name]) else: return None def Step(self): self.pc = self.pc+1 def PC(self,number=None): if number is not None: self.pc = number return self.pc def GetStackTop(self): return self.stack[-1] def LValue(self, newScope = None ): if newScope is not None: self.lvalue = newScope return self.lvalue def RValue(self, newScope = None ): if newScope is not None: self.rvalue = newScope return self.rvalue
Add functions to get/set variables addresses
Add functions to get/set variables addresses Since some of the jaz commands depend on the address of an variable, made function to obtain it.
Python
mit
joewashear007/jazzy
import uuid class Scope: def __init__(self): self.pc = 0; self.variables = {} self.lvalue = self self.rvalue = self self.stack = [1,2,3] self.name = uuid.uuid1() def GetVar(self, name): if name in self.variables: return self.variables[name] else: return None def SetVar(self, name, value): self.variables[name] = value + + def GetAddress(self, name): + if name in self.variables: + return list(self.variables.keys()).index(name) + else: + return None + def GetVarFromAddress(self, addr): + _vars = list(self.variables.keys()) + if addr < len(_vars): + name = list(self.variables.keys())[addr] + return (name, self.variables[name]) + else: + return None + def Step(self): self.pc = self.pc+1 def PC(self,number=None): if number is not None: self.pc = number return self.pc def GetStackTop(self): return self.stack[-1] def LValue(self, newScope = None ): if newScope is not None: self.lvalue = newScope return self.lvalue def RValue(self, newScope = None ): if newScope is not None: self.rvalue = newScope return self.rvalue
Add functions to get/set variables addresses
## Code Before: import uuid class Scope: def __init__(self): self.pc = 0; self.variables = {} self.lvalue = self self.rvalue = self self.stack = [1,2,3] self.name = uuid.uuid1() def GetVar(self, name): if name in self.variables: return self.variables[name] else: return None def SetVar(self, name, value): self.variables[name] = value def Step(self): self.pc = self.pc+1 def PC(self,number=None): if number is not None: self.pc = number return self.pc def GetStackTop(self): return self.stack[-1] def LValue(self, newScope = None ): if newScope is not None: self.lvalue = newScope return self.lvalue def RValue(self, newScope = None ): if newScope is not None: self.rvalue = newScope return self.rvalue ## Instruction: Add functions to get/set variables addresses ## Code After: import uuid class Scope: def __init__(self): self.pc = 0; self.variables = {} self.lvalue = self self.rvalue = self self.stack = [1,2,3] self.name = uuid.uuid1() def GetVar(self, name): if name in self.variables: return self.variables[name] else: return None def SetVar(self, name, value): self.variables[name] = value def GetAddress(self, name): if name in self.variables: return list(self.variables.keys()).index(name) else: return None def GetVarFromAddress(self, addr): _vars = list(self.variables.keys()) if addr < len(_vars): name = list(self.variables.keys())[addr] return (name, self.variables[name]) else: return None def Step(self): self.pc = self.pc+1 def PC(self,number=None): if number is not None: self.pc = number return self.pc def GetStackTop(self): return self.stack[-1] def LValue(self, newScope = None ): if newScope is not None: self.lvalue = newScope return self.lvalue def RValue(self, newScope = None ): if newScope is not None: self.rvalue = newScope return self.rvalue
... self.variables[name] = value def GetAddress(self, name): if name in self.variables: return list(self.variables.keys()).index(name) else: return None def GetVarFromAddress(self, addr): _vars = list(self.variables.keys()) if addr < len(_vars): name = list(self.variables.keys())[addr] return (name, self.variables[name]) else: return None ...
f4701ac73f884ef28e62bb35adc81330ce512171
goto_last_edit.py
goto_last_edit.py
import sublime_plugin # the last edited Region, keyed to View.id _last_edits = {} class RecordLastEdit(sublime_plugin.EventListener): def on_modified(self, view): _last_edits[view.id()] = view.sel()[0] class GotoLastEdit(sublime_plugin.TextCommand): def run(self, edit): last_edit = _last_edits.get(self.view.id(), None) if last_edit != None: self.view.sel().clear() self.view.sel().add(last_edit) self.view.show(last_edit)
import sublime, sublime_plugin LAST_EDITS_SETTING = 'last_edits' class RecordLastEdit(sublime_plugin.EventListener): def on_modified(self, view): last_edits = view.settings().get(LAST_EDITS_SETTING, {}) edit_position = view.sel()[0] last_edits[str(view.id())] = {'a': edit_position.a, 'b': edit_position.b} view.settings().set(LAST_EDITS_SETTING, last_edits) class GotoLastEdit(sublime_plugin.TextCommand): # The position the cursor was at before the command fired. Saved when the # command is run, so that if the user runs the command again before making # another edit in the file, the cursor returns to its original position. original_position = None def move_cursor_to_region(self, region): """ Clear the cursor's position and move it to `region`. """ cursor = self.view.sel() self.original_position = cursor[0] cursor.clear() cursor.add(region) self.view.show(region) def run(self, edit): """ If there was a last edit recorded for the view, store the current position as self.original_position and move the cursor to the position of the last edit. If the cursor is currently at the same position as the last edit, and there `self.original_position` is available, then return the cursor to its original position. """ last_edits = self.view.settings().get(LAST_EDITS_SETTING, {}) last_edit = last_edits.get(str(self.view.id()), None) current_position = self.view.sel()[0] if last_edit is None: return last_edit_region = sublime.Region( long(last_edit['a']), long(last_edit['b'])) if self.original_position is not None \ and current_position == last_edit_region: self.move_cursor_to_region(self.original_position) return self.move_cursor_to_region(last_edit_region)
Return to original if cursor is already at least edit
Return to original if cursor is already at least edit
Python
mit
abrookins/GotoLastEdit
- import sublime_plugin + import sublime, sublime_plugin + LAST_EDITS_SETTING = 'last_edits' - # the last edited Region, keyed to View.id - _last_edits = {} class RecordLastEdit(sublime_plugin.EventListener): def on_modified(self, view): - _last_edits[view.id()] = view.sel()[0] + last_edits = view.settings().get(LAST_EDITS_SETTING, {}) + edit_position = view.sel()[0] + last_edits[str(view.id())] = {'a': edit_position.a, 'b': edit_position.b} + view.settings().set(LAST_EDITS_SETTING, last_edits) class GotoLastEdit(sublime_plugin.TextCommand): - def run(self, edit): - last_edit = _last_edits.get(self.view.id(), None) + # The position the cursor was at before the command fired. Saved when the + # command is run, so that if the user runs the command again before making + # another edit in the file, the cursor returns to its original position. + original_position = None - if last_edit != None: - self.view.sel().clear() - self.view.sel().add(last_edit) + def move_cursor_to_region(self, region): + """ Clear the cursor's position and move it to `region`. """ + cursor = self.view.sel() + self.original_position = cursor[0] + cursor.clear() + cursor.add(region) - self.view.show(last_edit) + self.view.show(region) + def run(self, edit): + """ + If there was a last edit recorded for the view, store the current + position as self.original_position and move the cursor to the position + of the last edit. + + If the cursor is currently at the same position as the last edit, and + there `self.original_position` is available, then return the cursor to + its original position. + """ + last_edits = self.view.settings().get(LAST_EDITS_SETTING, {}) + last_edit = last_edits.get(str(self.view.id()), None) + current_position = self.view.sel()[0] + + if last_edit is None: + return + + last_edit_region = sublime.Region( + long(last_edit['a']), long(last_edit['b'])) + + if self.original_position is not None \ + and current_position == last_edit_region: + self.move_cursor_to_region(self.original_position) + return + + self.move_cursor_to_region(last_edit_region) +
Return to original if cursor is already at least edit
## Code Before: import sublime_plugin # the last edited Region, keyed to View.id _last_edits = {} class RecordLastEdit(sublime_plugin.EventListener): def on_modified(self, view): _last_edits[view.id()] = view.sel()[0] class GotoLastEdit(sublime_plugin.TextCommand): def run(self, edit): last_edit = _last_edits.get(self.view.id(), None) if last_edit != None: self.view.sel().clear() self.view.sel().add(last_edit) self.view.show(last_edit) ## Instruction: Return to original if cursor is already at least edit ## Code After: import sublime, sublime_plugin LAST_EDITS_SETTING = 'last_edits' class RecordLastEdit(sublime_plugin.EventListener): def on_modified(self, view): last_edits = view.settings().get(LAST_EDITS_SETTING, {}) edit_position = view.sel()[0] last_edits[str(view.id())] = {'a': edit_position.a, 'b': edit_position.b} view.settings().set(LAST_EDITS_SETTING, last_edits) class GotoLastEdit(sublime_plugin.TextCommand): # The position the cursor was at before the command fired. Saved when the # command is run, so that if the user runs the command again before making # another edit in the file, the cursor returns to its original position. original_position = None def move_cursor_to_region(self, region): """ Clear the cursor's position and move it to `region`. """ cursor = self.view.sel() self.original_position = cursor[0] cursor.clear() cursor.add(region) self.view.show(region) def run(self, edit): """ If there was a last edit recorded for the view, store the current position as self.original_position and move the cursor to the position of the last edit. If the cursor is currently at the same position as the last edit, and there `self.original_position` is available, then return the cursor to its original position. """ last_edits = self.view.settings().get(LAST_EDITS_SETTING, {}) last_edit = last_edits.get(str(self.view.id()), None) current_position = self.view.sel()[0] if last_edit is None: return last_edit_region = sublime.Region( long(last_edit['a']), long(last_edit['b'])) if self.original_position is not None \ and current_position == last_edit_region: self.move_cursor_to_region(self.original_position) return self.move_cursor_to_region(last_edit_region)
... import sublime, sublime_plugin ... LAST_EDITS_SETTING = 'last_edits' ... def on_modified(self, view): last_edits = view.settings().get(LAST_EDITS_SETTING, {}) edit_position = view.sel()[0] last_edits[str(view.id())] = {'a': edit_position.a, 'b': edit_position.b} view.settings().set(LAST_EDITS_SETTING, last_edits) ... class GotoLastEdit(sublime_plugin.TextCommand): # The position the cursor was at before the command fired. Saved when the # command is run, so that if the user runs the command again before making # another edit in the file, the cursor returns to its original position. original_position = None def move_cursor_to_region(self, region): """ Clear the cursor's position and move it to `region`. """ cursor = self.view.sel() self.original_position = cursor[0] cursor.clear() cursor.add(region) self.view.show(region) def run(self, edit): """ If there was a last edit recorded for the view, store the current position as self.original_position and move the cursor to the position of the last edit. If the cursor is currently at the same position as the last edit, and there `self.original_position` is available, then return the cursor to its original position. """ last_edits = self.view.settings().get(LAST_EDITS_SETTING, {}) last_edit = last_edits.get(str(self.view.id()), None) current_position = self.view.sel()[0] if last_edit is None: return last_edit_region = sublime.Region( long(last_edit['a']), long(last_edit['b'])) if self.original_position is not None \ and current_position == last_edit_region: self.move_cursor_to_region(self.original_position) return self.move_cursor_to_region(last_edit_region) ...
175c1775aa7f5cd0ba2022e95389507d8a4c87dc
syncplay/__init__.py
syncplay/__init__.py
version = '1.6.6' revision = ' development' milestone = 'Yoitsu' release_number = '87' projectURL = 'https://syncplay.pl/'
version = '1.6.6' revision = ' beta 1' milestone = 'Yoitsu' release_number = '88' projectURL = 'https://syncplay.pl/'
Mark as 1.6.6 beta 1
Mark as 1.6.6 beta 1
Python
apache-2.0
alby128/syncplay,Syncplay/syncplay,Syncplay/syncplay,alby128/syncplay
version = '1.6.6' - revision = ' development' + revision = ' beta 1' milestone = 'Yoitsu' - release_number = '87' + release_number = '88' projectURL = 'https://syncplay.pl/'
Mark as 1.6.6 beta 1
## Code Before: version = '1.6.6' revision = ' development' milestone = 'Yoitsu' release_number = '87' projectURL = 'https://syncplay.pl/' ## Instruction: Mark as 1.6.6 beta 1 ## Code After: version = '1.6.6' revision = ' beta 1' milestone = 'Yoitsu' release_number = '88' projectURL = 'https://syncplay.pl/'
// ... existing code ... version = '1.6.6' revision = ' beta 1' milestone = 'Yoitsu' release_number = '88' projectURL = 'https://syncplay.pl/' // ... rest of the code ...
df5e6bdd03ad666afdd9b61745eec95afc08e9cb
tests/test_views.py
tests/test_views.py
""" Tests for the main server file. """ from unittest import TestCase from unittest.mock import patch from app import views class ViewsTestCase(TestCase): """ Our main server testcase. """ def test_ping(self): self.assertEqual(views.ping(None, None), 'pong') @patch('app.views.notify_recipient') @patch('app.views.is_valid_pull_request') def test_valid_pull_request(self, validator, notifier): validator.return_value = True notifier.return_value = True result = views.pull_request({}, None) self.assertEqual(result, 'Recipient Notified') @patch('app.views.is_valid_pull_request') def test_invalid_pull_request(self, validator): validator.return_value = False result = views.pull_request({}, None) self.assertRegex(result, 'ignored')
""" Tests for the main server file. """ from unittest import TestCase from unittest.mock import patch from app import views class ViewsTestCase(TestCase): """ Our main server testcase. """ def test_ping(self): self.assertEqual(views.ping(None, None), 'pong') @patch('app.views.notify_recipient') @patch('app.views.is_valid_pull_request') def test_valid_pull_request(self, validator, notifier): """ Should notify upon a valid pull request. """ validator.return_value = True notifier.return_value = True result = views.pull_request({}, None) self.assertEqual(result, 'Recipient Notified') @patch('app.views.is_valid_pull_request') def test_invalid_pull_request(self, validator): """ Should ignore an invalid pull request. """ validator.return_value = False result = views.pull_request({}, None) self.assertRegex(result, 'ignored')
Fix last code quality issues
Fix last code quality issues
Python
mit
DobaTech/github-review-slack-notifier
""" Tests for the main server file. """ from unittest import TestCase from unittest.mock import patch from app import views class ViewsTestCase(TestCase): """ Our main server testcase. """ def test_ping(self): self.assertEqual(views.ping(None, None), 'pong') @patch('app.views.notify_recipient') @patch('app.views.is_valid_pull_request') def test_valid_pull_request(self, validator, notifier): + """ Should notify upon a valid pull request. """ validator.return_value = True notifier.return_value = True result = views.pull_request({}, None) self.assertEqual(result, 'Recipient Notified') @patch('app.views.is_valid_pull_request') def test_invalid_pull_request(self, validator): + """ Should ignore an invalid pull request. """ validator.return_value = False result = views.pull_request({}, None) self.assertRegex(result, 'ignored')
Fix last code quality issues
## Code Before: """ Tests for the main server file. """ from unittest import TestCase from unittest.mock import patch from app import views class ViewsTestCase(TestCase): """ Our main server testcase. """ def test_ping(self): self.assertEqual(views.ping(None, None), 'pong') @patch('app.views.notify_recipient') @patch('app.views.is_valid_pull_request') def test_valid_pull_request(self, validator, notifier): validator.return_value = True notifier.return_value = True result = views.pull_request({}, None) self.assertEqual(result, 'Recipient Notified') @patch('app.views.is_valid_pull_request') def test_invalid_pull_request(self, validator): validator.return_value = False result = views.pull_request({}, None) self.assertRegex(result, 'ignored') ## Instruction: Fix last code quality issues ## Code After: """ Tests for the main server file. """ from unittest import TestCase from unittest.mock import patch from app import views class ViewsTestCase(TestCase): """ Our main server testcase. """ def test_ping(self): self.assertEqual(views.ping(None, None), 'pong') @patch('app.views.notify_recipient') @patch('app.views.is_valid_pull_request') def test_valid_pull_request(self, validator, notifier): """ Should notify upon a valid pull request. """ validator.return_value = True notifier.return_value = True result = views.pull_request({}, None) self.assertEqual(result, 'Recipient Notified') @patch('app.views.is_valid_pull_request') def test_invalid_pull_request(self, validator): """ Should ignore an invalid pull request. """ validator.return_value = False result = views.pull_request({}, None) self.assertRegex(result, 'ignored')
... def test_valid_pull_request(self, validator, notifier): """ Should notify upon a valid pull request. """ validator.return_value = True ... def test_invalid_pull_request(self, validator): """ Should ignore an invalid pull request. """ validator.return_value = False ...
c8429ec00772455c981ebb799f0c87de55bda64e
django_fixmystreet/backoffice/forms.py
django_fixmystreet/backoffice/forms.py
from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): # assemble the opt groups. choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = 1 if Session.objects.all()[0].session_key: currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
Fix user not defined error for not logged in users
Fix user not defined error for not logged in users
Python
agpl-3.0
IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet,IMIO/django-fixmystreet
from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session + from django.contrib.auth.decorators import login_required + class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): - # assemble the opt groups. choices = [] choices.append(('', ugettext_lazy("Select a manager"))) + currentUserOrganisationId = 1 + if Session.objects.all()[0].session_key: - currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation + currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model + class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
Fix user not defined error for not logged in users
## Code Before: from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): # assemble the opt groups. choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="") ## Instruction: Fix user not defined error for not logged in users ## Code After: from django import forms from django_fixmystreet.fixmystreet.models import FMSUser, getLoggedInUserId from django.contrib.auth.models import User from django.conf import settings from django.utils.translation import ugettext_lazy from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required class ManagersChoiceField (forms.fields.ChoiceField): def __init__(self, *args, **kwargs): choices = [] choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = 1 if Session.objects.all()[0].session_key: currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) managers = managers.filter(organisation_id=currentUserOrganisationId) for manager in managers: choices.append((manager.pk,manager.first_name+manager.last_name)) super(ManagersChoiceField,self).__init__(choices,*args,**kwargs) def clean(self, value): super(ManagersChoiceField,self).clean(value) try: model = FMSUser.objects.get(pk=value) except FMSUser.DoesNotExist: raise ValidationError(self.error_messages['invalid_choice']) return model class ManagersListForm(forms.Form): manager=ManagersChoiceField(label="")
// ... existing code ... from django.contrib.sessions.models import Session from django.contrib.auth.decorators import login_required // ... modified code ... def __init__(self, *args, **kwargs): choices = [] ... choices.append(('', ugettext_lazy("Select a manager"))) currentUserOrganisationId = 1 if Session.objects.all()[0].session_key: currentUserOrganisationId = FMSUser.objects.get(pk=getLoggedInUserId(Session.objects.all()[0].session_key)).organisation managers = FMSUser.objects.filter(manager=True) ... class ManagersListForm(forms.Form): // ... rest of the code ...
00435d8f0cc906878cd6084c78c17cbc5a49b66e
spacy/tests/parser/test_beam_parse.py
spacy/tests/parser/test_beam_parse.py
from __future__ import unicode_literals import pytest @pytest.mark.models('en') def test_beam_parse(EN): doc = EN(u'Australia is a country', disable=['ner']) ents = EN.entity(doc, beam_width=2) print(ents)
from __future__ import unicode_literals import pytest from ...language import Language from ...pipeline import DependencyParser @pytest.mark.models('en') def test_beam_parse_en(EN): doc = EN(u'Australia is a country', disable=['ner']) ents = EN.entity(doc, beam_width=2) print(ents) def test_beam_parse(): nlp = Language() nlp.add_pipe(DependencyParser(nlp.vocab), name='parser') nlp.parser.add_label('nsubj') nlp.begin_training() doc = nlp.make_doc(u'Australia is a country') nlp.parser(doc, beam_width=2)
Add extra beam parsing test
Add extra beam parsing test
Python
mit
aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,honnibal/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,aikramer2/spaCy
from __future__ import unicode_literals import pytest + from ...language import Language + from ...pipeline import DependencyParser @pytest.mark.models('en') - def test_beam_parse(EN): + def test_beam_parse_en(EN): doc = EN(u'Australia is a country', disable=['ner']) ents = EN.entity(doc, beam_width=2) print(ents) + + def test_beam_parse(): + nlp = Language() + nlp.add_pipe(DependencyParser(nlp.vocab), name='parser') + nlp.parser.add_label('nsubj') + nlp.begin_training() + + doc = nlp.make_doc(u'Australia is a country') + nlp.parser(doc, beam_width=2) +
Add extra beam parsing test
## Code Before: from __future__ import unicode_literals import pytest @pytest.mark.models('en') def test_beam_parse(EN): doc = EN(u'Australia is a country', disable=['ner']) ents = EN.entity(doc, beam_width=2) print(ents) ## Instruction: Add extra beam parsing test ## Code After: from __future__ import unicode_literals import pytest from ...language import Language from ...pipeline import DependencyParser @pytest.mark.models('en') def test_beam_parse_en(EN): doc = EN(u'Australia is a country', disable=['ner']) ents = EN.entity(doc, beam_width=2) print(ents) def test_beam_parse(): nlp = Language() nlp.add_pipe(DependencyParser(nlp.vocab), name='parser') nlp.parser.add_label('nsubj') nlp.begin_training() doc = nlp.make_doc(u'Australia is a country') nlp.parser(doc, beam_width=2)
// ... existing code ... import pytest from ...language import Language from ...pipeline import DependencyParser // ... modified code ... @pytest.mark.models('en') def test_beam_parse_en(EN): doc = EN(u'Australia is a country', disable=['ner']) ... print(ents) def test_beam_parse(): nlp = Language() nlp.add_pipe(DependencyParser(nlp.vocab), name='parser') nlp.parser.add_label('nsubj') nlp.begin_training() doc = nlp.make_doc(u'Australia is a country') nlp.parser(doc, beam_width=2) // ... rest of the code ...
2403cbe2aa8f515bdd8f575112478010389ee48b
conan/ConanServerToArtifactory/migrate.py
conan/ConanServerToArtifactory/migrate.py
import os import subprocess def run(cmd): ret = os.system(cmd) if ret != 0: raise Exception("Command failed: %s" % cmd) # Assuming local = conan_server and Artifactory remotes output = subprocess.check_output("conan search -r=local --raw") packages = output.splitlines() for package in packages: print("Downloading %s" % package) run("conan download %s -r=local" % package) run("conan upload * --all --confirm -r=artifactory")
import os import subprocess def run(cmd): ret = os.system(cmd) if ret != 0: raise Exception("Command failed: %s" % cmd) # Assuming local = conan_server and Artifactory remotes output = subprocess.check_output("conan search * --remote=local --raw") packages = output.decode("utf-8").splitlines() for package in packages[:1]: print("Downloading %s" % package) run("conan download {} --remote=local".format(package)) run("conan upload * --all --confirm -r=artifactory")
Update Conan server migration script
Update Conan server migration script
Python
apache-2.0
JFrogDev/artifactory-scripts,JFrogDev/artifactory-scripts,JFrogDev/artifactory-scripts,JFrogDev/artifactory-scripts,JFrogDev/artifactory-scripts,JFrogDev/artifactory-scripts
import os import subprocess def run(cmd): ret = os.system(cmd) if ret != 0: raise Exception("Command failed: %s" % cmd) # Assuming local = conan_server and Artifactory remotes - output = subprocess.check_output("conan search -r=local --raw") + output = subprocess.check_output("conan search * --remote=local --raw") - packages = output.splitlines() + packages = output.decode("utf-8").splitlines() - for package in packages: + for package in packages[:1]: print("Downloading %s" % package) - run("conan download %s -r=local" % package) + run("conan download {} --remote=local".format(package)) run("conan upload * --all --confirm -r=artifactory")
Update Conan server migration script
## Code Before: import os import subprocess def run(cmd): ret = os.system(cmd) if ret != 0: raise Exception("Command failed: %s" % cmd) # Assuming local = conan_server and Artifactory remotes output = subprocess.check_output("conan search -r=local --raw") packages = output.splitlines() for package in packages: print("Downloading %s" % package) run("conan download %s -r=local" % package) run("conan upload * --all --confirm -r=artifactory") ## Instruction: Update Conan server migration script ## Code After: import os import subprocess def run(cmd): ret = os.system(cmd) if ret != 0: raise Exception("Command failed: %s" % cmd) # Assuming local = conan_server and Artifactory remotes output = subprocess.check_output("conan search * --remote=local --raw") packages = output.decode("utf-8").splitlines() for package in packages[:1]: print("Downloading %s" % package) run("conan download {} --remote=local".format(package)) run("conan upload * --all --confirm -r=artifactory")
// ... existing code ... # Assuming local = conan_server and Artifactory remotes output = subprocess.check_output("conan search * --remote=local --raw") packages = output.decode("utf-8").splitlines() for package in packages[:1]: print("Downloading %s" % package) run("conan download {} --remote=local".format(package)) // ... rest of the code ...
ee2db892b4dafa33115779166773e248c17a1b43
kyoto/tests/test_client.py
kyoto/tests/test_client.py
import unittest import kyoto.server import kyoto.tests.dummy import kyoto.client class ServiceTestCase(unittest.TestCase): def setUp(self): self.address = ('localhost', 1337) self.server = kyoto.server.BertRPCServer([kyoto.tests.dummy]) self.server.start() self.service = kyoto.client.Service(self.address, ":dummy") def test_invalid_service_name_type(self): with self.assertRaises(ValueError): service = kyoto.client.Service(self.address, "dummy") def test_sync_request(self): response = self.service.call(":echo", ["hello"]) self.assertEqual(response, "hello?") def test_async_request(self): response = self.service.cast(":echo", ["hello"]) self.assertEqual(response, None) def tearDown(self): self.server.stop()
import unittest import kyoto.server import kyoto.tests.dummy import kyoto.client class ServiceTestCase(unittest.TestCase): def setUp(self): self.address = ('localhost', 1337) self.server = kyoto.server.BertRPCServer([kyoto.tests.dummy]) self.server.start() self.service = kyoto.client.Service(self.address, ":dummy") def test_invalid_module_name_type(self): with self.assertRaises(ValueError): service = kyoto.client.Service(self.address, "dummy") service = kyoto.client.Service(self.address, ":dummy") def test_sync_request(self): response = self.service.call(":echo", ["hello"]) self.assertEqual(response, "hello?") def test_async_request(self): response = self.service.cast(":echo", ["hello"]) self.assertEqual(response, None) def tearDown(self): self.server.stop()
Add valid module name test case
Add valid module name test case
Python
mit
kyoto-project/kyoto
import unittest import kyoto.server import kyoto.tests.dummy import kyoto.client class ServiceTestCase(unittest.TestCase): def setUp(self): self.address = ('localhost', 1337) self.server = kyoto.server.BertRPCServer([kyoto.tests.dummy]) self.server.start() self.service = kyoto.client.Service(self.address, ":dummy") - def test_invalid_service_name_type(self): + def test_invalid_module_name_type(self): with self.assertRaises(ValueError): service = kyoto.client.Service(self.address, "dummy") + service = kyoto.client.Service(self.address, ":dummy") def test_sync_request(self): response = self.service.call(":echo", ["hello"]) self.assertEqual(response, "hello?") def test_async_request(self): response = self.service.cast(":echo", ["hello"]) self.assertEqual(response, None) def tearDown(self): self.server.stop()
Add valid module name test case
## Code Before: import unittest import kyoto.server import kyoto.tests.dummy import kyoto.client class ServiceTestCase(unittest.TestCase): def setUp(self): self.address = ('localhost', 1337) self.server = kyoto.server.BertRPCServer([kyoto.tests.dummy]) self.server.start() self.service = kyoto.client.Service(self.address, ":dummy") def test_invalid_service_name_type(self): with self.assertRaises(ValueError): service = kyoto.client.Service(self.address, "dummy") def test_sync_request(self): response = self.service.call(":echo", ["hello"]) self.assertEqual(response, "hello?") def test_async_request(self): response = self.service.cast(":echo", ["hello"]) self.assertEqual(response, None) def tearDown(self): self.server.stop() ## Instruction: Add valid module name test case ## Code After: import unittest import kyoto.server import kyoto.tests.dummy import kyoto.client class ServiceTestCase(unittest.TestCase): def setUp(self): self.address = ('localhost', 1337) self.server = kyoto.server.BertRPCServer([kyoto.tests.dummy]) self.server.start() self.service = kyoto.client.Service(self.address, ":dummy") def test_invalid_module_name_type(self): with self.assertRaises(ValueError): service = kyoto.client.Service(self.address, "dummy") service = kyoto.client.Service(self.address, ":dummy") def test_sync_request(self): response = self.service.call(":echo", ["hello"]) self.assertEqual(response, "hello?") def test_async_request(self): response = self.service.cast(":echo", ["hello"]) self.assertEqual(response, None) def tearDown(self): self.server.stop()
// ... existing code ... def test_invalid_module_name_type(self): with self.assertRaises(ValueError): // ... modified code ... service = kyoto.client.Service(self.address, "dummy") service = kyoto.client.Service(self.address, ":dummy") // ... rest of the code ...
096f9e86755a6967d732986c51ae00855551cf4d
project_name/urls.py
project_name/urls.py
from django.conf import settings from django.conf.urls import include, url # noqa from django.contrib import admin from django.views.generic import TemplateView import django_js_reverse.views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'), url(r'^$', TemplateView.as_view(template_name='exampleapp/itworks.html'), name='home'), ] if settings.DEBUG: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
from django.conf.urls import include, url # noqa from django.contrib import admin from django.views.generic import TemplateView import django_js_reverse.views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'), url(r'^$', TemplateView.as_view(template_name='exampleapp/itworks.html'), name='home'), ]
Remove usage from debug toolbar
Remove usage from debug toolbar
Python
mit
vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate,vintasoftware/django-react-boilerplate
- from django.conf import settings from django.conf.urls import include, url # noqa from django.contrib import admin from django.views.generic import TemplateView import django_js_reverse.views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'), url(r'^$', TemplateView.as_view(template_name='exampleapp/itworks.html'), name='home'), ] - if settings.DEBUG: - import debug_toolbar - urlpatterns = [ - url(r'^__debug__/', include(debug_toolbar.urls)), - ] + urlpatterns -
Remove usage from debug toolbar
## Code Before: from django.conf import settings from django.conf.urls import include, url # noqa from django.contrib import admin from django.views.generic import TemplateView import django_js_reverse.views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'), url(r'^$', TemplateView.as_view(template_name='exampleapp/itworks.html'), name='home'), ] if settings.DEBUG: import debug_toolbar urlpatterns = [ url(r'^__debug__/', include(debug_toolbar.urls)), ] + urlpatterns ## Instruction: Remove usage from debug toolbar ## Code After: from django.conf.urls import include, url # noqa from django.contrib import admin from django.views.generic import TemplateView import django_js_reverse.views urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'), url(r'^$', TemplateView.as_view(template_name='exampleapp/itworks.html'), name='home'), ]
# ... existing code ... from django.conf.urls import include, url # noqa # ... modified code ... ] # ... rest of the code ...
ecbb3ffdf063bc53eae0f8bd180e62ae61f99fee
opencontrail_netns/vrouter_control.py
opencontrail_netns/vrouter_control.py
from contrail_vrouter_api.vrouter_api import ContrailVRouterApi def interface_register(vm, vmi, iface_name): api = ContrailVRouterApi() mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] api.add_port(vm.uuid, vmi.uuid, iface_name, mac) def interface_unregister(vmi_uuid): api = ContrailVRouterApi() api.delete_port(vmi_uuid)
from contrail_vrouter_api.vrouter_api import ContrailVRouterApi def interface_register(vm, vmi, iface_name): api = ContrailVRouterApi() mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] api.add_port(vm.uuid, vmi.uuid, iface_name, mac, port_type='NovaVMPort') def interface_unregister(vmi_uuid): api = ContrailVRouterApi() api.delete_port(vmi_uuid)
Use NovaVMPort type; otherwise the agent will believe it is a Use NovaVMPort as type; otherwise the agent will believe it is dealing with a service-instance and will not send a VM registration.
Use NovaVMPort type; otherwise the agent will believe it is a Use NovaVMPort as type; otherwise the agent will believe it is dealing with a service-instance and will not send a VM registration.
Python
apache-2.0
pedro-r-marques/opencontrail-netns,DreamLab/opencontrail-netns
from contrail_vrouter_api.vrouter_api import ContrailVRouterApi def interface_register(vm, vmi, iface_name): api = ContrailVRouterApi() mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] - api.add_port(vm.uuid, vmi.uuid, iface_name, mac) + api.add_port(vm.uuid, vmi.uuid, iface_name, mac, port_type='NovaVMPort') def interface_unregister(vmi_uuid): api = ContrailVRouterApi() api.delete_port(vmi_uuid)
Use NovaVMPort type; otherwise the agent will believe it is a Use NovaVMPort as type; otherwise the agent will believe it is dealing with a service-instance and will not send a VM registration.
## Code Before: from contrail_vrouter_api.vrouter_api import ContrailVRouterApi def interface_register(vm, vmi, iface_name): api = ContrailVRouterApi() mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] api.add_port(vm.uuid, vmi.uuid, iface_name, mac) def interface_unregister(vmi_uuid): api = ContrailVRouterApi() api.delete_port(vmi_uuid) ## Instruction: Use NovaVMPort type; otherwise the agent will believe it is a Use NovaVMPort as type; otherwise the agent will believe it is dealing with a service-instance and will not send a VM registration. ## Code After: from contrail_vrouter_api.vrouter_api import ContrailVRouterApi def interface_register(vm, vmi, iface_name): api = ContrailVRouterApi() mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] api.add_port(vm.uuid, vmi.uuid, iface_name, mac, port_type='NovaVMPort') def interface_unregister(vmi_uuid): api = ContrailVRouterApi() api.delete_port(vmi_uuid)
... mac = vmi.virtual_machine_interface_mac_addresses.mac_address[0] api.add_port(vm.uuid, vmi.uuid, iface_name, mac, port_type='NovaVMPort') ...
0e0b96d0d800716102204cfdca7317ccb92cee95
pytextql/util.py
pytextql/util.py
import csv import itertools def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk) class UnicodeCSVReader(object): """ An extremely minimal wrapper around csv.reader to assist in reading Unicode data. """ def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.pad_to = kwargs.pop('pad_to', 0) self.pad_with = kwargs.pop('pad_with', '') self.reader = csv.reader(*args, **kwargs) def next(self): row = self.reader.next() padding = [self.pad_with] * (self.pad_to - len(row)) return [unicode(c, self.encoding) for c in row] + padding def __iter__(self): return self @property def dialect(self): return self.reader.dialect @property def line_num(self): return self.reader.line_num
import csv import itertools def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk) class UnicodeCSVReader(object): """ An extremely minimal wrapper around csv.reader to assist in reading Unicode data. """ def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.pad_to = kwargs.pop('pad_to', 0) self.pad_with = kwargs.pop('pad_with', '') self.reader = csv.reader(*args, **kwargs) def next(self): row = self.reader.next() padding = [self.pad_with] * (self.pad_to - len(row)) return [unicode(c, self.encoding) for c in row] + padding def __iter__(self): return self @property def dialect(self): return self.reader.dialect @property def line_num(self): return self.reader.line_num class UnicodeCSVWriter(object): def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.writer = csv.writer(*args, **kwargs) def writerow(self, row): self.writer.writerow([ column.encode(self.encoding) for column in row ]) def writerows(self, rows): for row in rows: self.writerow(row)
Add a simple UnicodeCSVWriter, probably flawed.
Add a simple UnicodeCSVWriter, probably flawed.
Python
mit
TkTech/pytextql
import csv import itertools def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk) class UnicodeCSVReader(object): """ An extremely minimal wrapper around csv.reader to assist in reading Unicode data. """ def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.pad_to = kwargs.pop('pad_to', 0) self.pad_with = kwargs.pop('pad_with', '') self.reader = csv.reader(*args, **kwargs) def next(self): row = self.reader.next() padding = [self.pad_with] * (self.pad_to - len(row)) return [unicode(c, self.encoding) for c in row] + padding def __iter__(self): return self @property def dialect(self): return self.reader.dialect @property def line_num(self): return self.reader.line_num + + class UnicodeCSVWriter(object): + def __init__(self, *args, **kwargs): + self.encoding = kwargs.pop('encoding', 'utf8') + self.writer = csv.writer(*args, **kwargs) + + def writerow(self, row): + self.writer.writerow([ + column.encode(self.encoding) for column in row + ]) + + def writerows(self, rows): + for row in rows: + self.writerow(row) +
Add a simple UnicodeCSVWriter, probably flawed.
## Code Before: import csv import itertools def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk) class UnicodeCSVReader(object): """ An extremely minimal wrapper around csv.reader to assist in reading Unicode data. """ def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.pad_to = kwargs.pop('pad_to', 0) self.pad_with = kwargs.pop('pad_with', '') self.reader = csv.reader(*args, **kwargs) def next(self): row = self.reader.next() padding = [self.pad_with] * (self.pad_to - len(row)) return [unicode(c, self.encoding) for c in row] + padding def __iter__(self): return self @property def dialect(self): return self.reader.dialect @property def line_num(self): return self.reader.line_num ## Instruction: Add a simple UnicodeCSVWriter, probably flawed. ## Code After: import csv import itertools def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk) class UnicodeCSVReader(object): """ An extremely minimal wrapper around csv.reader to assist in reading Unicode data. """ def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.pad_to = kwargs.pop('pad_to', 0) self.pad_with = kwargs.pop('pad_with', '') self.reader = csv.reader(*args, **kwargs) def next(self): row = self.reader.next() padding = [self.pad_with] * (self.pad_to - len(row)) return [unicode(c, self.encoding) for c in row] + padding def __iter__(self): return self @property def dialect(self): return self.reader.dialect @property def line_num(self): return self.reader.line_num class UnicodeCSVWriter(object): def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.writer = csv.writer(*args, **kwargs) def writerow(self, row): self.writer.writerow([ column.encode(self.encoding) for column in row ]) def writerows(self, rows): for row in rows: self.writerow(row)
... return self.reader.line_num class UnicodeCSVWriter(object): def __init__(self, *args, **kwargs): self.encoding = kwargs.pop('encoding', 'utf8') self.writer = csv.writer(*args, **kwargs) def writerow(self, row): self.writer.writerow([ column.encode(self.encoding) for column in row ]) def writerows(self, rows): for row in rows: self.writerow(row) ...
6d91ed53a15672b1e70d74691158e9afb7162e74
src/etcd/__init__.py
src/etcd/__init__.py
import collections from client import Client class EtcdResult(collections.namedtuple( 'EtcdResult', [ 'action', 'index', 'key', 'prevValue', 'value', 'expiration', 'ttl', 'newKey'])): def __new__( cls, action=None, index=None, key=None, prevValue=None, value=None, expiration=None, ttl=None, newKey=None): return super(EtcdResult, cls).__new__( cls, action, index, key, prevValue, value, expiration, ttl, newKey) class EtcdException(Exception): """ Generic Etcd Exception. """ pass
import collections from client import Client class EtcdResult(collections.namedtuple( 'EtcdResult', [ 'action', 'index', 'key', 'prevValue', 'value', 'expiration', 'ttl', 'newKey'])): def __new__( cls, action=None, index=None, key=None, prevValue=None, value=None, expiration=None, ttl=None, newKey=None): return super(EtcdResult, cls).__new__( cls, action, index, key, prevValue, value, expiration, ttl, newKey) class EtcdException(Exception): """ Generic Etcd Exception. """ pass # Attempt to enable urllib3's SNI support, if possible # Blatantly copied from requests. try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass
Add optional support for SSL SNI
Add optional support for SSL SNI
Python
mit
dmonroy/python-etcd,ocadotechnology/python-etcd,thepwagner/python-etcd,dmonroy/python-etcd,jlamillan/python-etcd,ocadotechnology/python-etcd,aziontech/python-etcd,jplana/python-etcd,sentinelleader/python-etcd,vodik/python-etcd,projectcalico/python-etcd,j-mcnally/python-etcd,j-mcnally/python-etcd,mbrukman/python-etcd,aziontech/python-etcd,jlamillan/python-etcd,kireal/python-etcd,jplana/python-etcd,kireal/python-etcd,mbrukman/python-etcd,vodik/python-etcd,thepwagner/python-etcd,projectcalico/python-etcd,sentinelleader/python-etcd
import collections from client import Client class EtcdResult(collections.namedtuple( 'EtcdResult', [ 'action', 'index', 'key', 'prevValue', 'value', 'expiration', 'ttl', 'newKey'])): def __new__( cls, action=None, index=None, key=None, prevValue=None, value=None, expiration=None, ttl=None, newKey=None): return super(EtcdResult, cls).__new__( cls, action, index, key, prevValue, value, expiration, ttl, newKey) class EtcdException(Exception): """ Generic Etcd Exception. """ pass + # Attempt to enable urllib3's SNI support, if possible + # Blatantly copied from requests. + try: + from urllib3.contrib import pyopenssl + pyopenssl.inject_into_urllib3() + except ImportError: + pass +
Add optional support for SSL SNI
## Code Before: import collections from client import Client class EtcdResult(collections.namedtuple( 'EtcdResult', [ 'action', 'index', 'key', 'prevValue', 'value', 'expiration', 'ttl', 'newKey'])): def __new__( cls, action=None, index=None, key=None, prevValue=None, value=None, expiration=None, ttl=None, newKey=None): return super(EtcdResult, cls).__new__( cls, action, index, key, prevValue, value, expiration, ttl, newKey) class EtcdException(Exception): """ Generic Etcd Exception. """ pass ## Instruction: Add optional support for SSL SNI ## Code After: import collections from client import Client class EtcdResult(collections.namedtuple( 'EtcdResult', [ 'action', 'index', 'key', 'prevValue', 'value', 'expiration', 'ttl', 'newKey'])): def __new__( cls, action=None, index=None, key=None, prevValue=None, value=None, expiration=None, ttl=None, newKey=None): return super(EtcdResult, cls).__new__( cls, action, index, key, prevValue, value, expiration, ttl, newKey) class EtcdException(Exception): """ Generic Etcd Exception. """ pass # Attempt to enable urllib3's SNI support, if possible # Blatantly copied from requests. try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass
... pass # Attempt to enable urllib3's SNI support, if possible # Blatantly copied from requests. try: from urllib3.contrib import pyopenssl pyopenssl.inject_into_urllib3() except ImportError: pass ...
5d3d47e0fae9ddb9f445972e5186429163aabf40
statirator/core/management/commands/init.py
statirator/core/management/commands/init.py
import os from optparse import make_option from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Init the static site project" args = '[directory]' option_list = ( make_option( '--title', '-t', dest='title', default='Default site', help='Site title [Default: "%defaults"]'), make_option( '--domain', '-d', dest='domain', default='example.com', help='Domain name [Default: "%default"]'), make_option( '--languages', '-l', dest='languages', default=['he', 'en'], action='append', help='Supported languages. [Default: "%default"]') ) + BaseCommand.option_list def handle(self, directory, **options): from django.conf.global_settings import LANGUAGES extra = { 'build': 'build', 'default_lang': options['languages'][0], 'languages': [l for l in LANGUAGES if l[0] in options["languages"]], 'extensions': ('py', ), 'files': (), 'template': os.path.abspath( os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'project_template')), } extra.update(options) from django.core.management import call_command call_command('startproject', 'conf', directory, **extra)
import os import logging from django.core.management.base import BaseCommand from optparse import make_option class Command(BaseCommand): help = "Init the static site project" args = '[directory]' option_list = ( make_option( '--title', '-t', dest='title', default='Default site', help='Site title [Default: "%defaults"]'), make_option( '--domain', '-d', dest='domain', default='example.com', help='Domain name [Default: "%default"]'), make_option( '--languages', '-l', dest='languages', default=['he', 'en'], action='append', help='Supported languages. [Default: "%default"]') ) + BaseCommand.option_list def handle(self, directory, **options): logging.info("Initializing project structure in %s", directory) os.makedirs(directory) from django.conf.global_settings import LANGUAGES extra = { 'build': 'build', 'default_lang': options['languages'][0], 'languages': [l for l in LANGUAGES if l[0] in options["languages"]], 'extensions': ('py', ), 'files': (), 'template': os.path.abspath( os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'project_template')), } extra.update(options) from django.core.management import call_command call_command('startproject', 'conf', directory, **extra)
Create the directory before calling the startprojcet command
Create the directory before calling the startprojcet command
Python
mit
MeirKriheli/statirator,MeirKriheli/statirator,MeirKriheli/statirator
import os + import logging + from django.core.management.base import BaseCommand from optparse import make_option - from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Init the static site project" args = '[directory]' option_list = ( make_option( '--title', '-t', dest='title', default='Default site', help='Site title [Default: "%defaults"]'), make_option( '--domain', '-d', dest='domain', default='example.com', help='Domain name [Default: "%default"]'), make_option( '--languages', '-l', dest='languages', default=['he', 'en'], action='append', help='Supported languages. [Default: "%default"]') ) + BaseCommand.option_list def handle(self, directory, **options): + logging.info("Initializing project structure in %s", directory) + os.makedirs(directory) + from django.conf.global_settings import LANGUAGES extra = { 'build': 'build', 'default_lang': options['languages'][0], 'languages': [l for l in LANGUAGES if l[0] in options["languages"]], 'extensions': ('py', ), 'files': (), 'template': os.path.abspath( os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'project_template')), } extra.update(options) from django.core.management import call_command call_command('startproject', 'conf', directory, **extra)
Create the directory before calling the startprojcet command
## Code Before: import os from optparse import make_option from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Init the static site project" args = '[directory]' option_list = ( make_option( '--title', '-t', dest='title', default='Default site', help='Site title [Default: "%defaults"]'), make_option( '--domain', '-d', dest='domain', default='example.com', help='Domain name [Default: "%default"]'), make_option( '--languages', '-l', dest='languages', default=['he', 'en'], action='append', help='Supported languages. [Default: "%default"]') ) + BaseCommand.option_list def handle(self, directory, **options): from django.conf.global_settings import LANGUAGES extra = { 'build': 'build', 'default_lang': options['languages'][0], 'languages': [l for l in LANGUAGES if l[0] in options["languages"]], 'extensions': ('py', ), 'files': (), 'template': os.path.abspath( os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'project_template')), } extra.update(options) from django.core.management import call_command call_command('startproject', 'conf', directory, **extra) ## Instruction: Create the directory before calling the startprojcet command ## Code After: import os import logging from django.core.management.base import BaseCommand from optparse import make_option class Command(BaseCommand): help = "Init the static site project" args = '[directory]' option_list = ( make_option( '--title', '-t', dest='title', default='Default site', help='Site title [Default: "%defaults"]'), make_option( '--domain', '-d', dest='domain', default='example.com', help='Domain name [Default: "%default"]'), make_option( '--languages', '-l', dest='languages', default=['he', 'en'], action='append', help='Supported languages. [Default: "%default"]') ) + BaseCommand.option_list def handle(self, directory, **options): logging.info("Initializing project structure in %s", directory) os.makedirs(directory) from django.conf.global_settings import LANGUAGES extra = { 'build': 'build', 'default_lang': options['languages'][0], 'languages': [l for l in LANGUAGES if l[0] in options["languages"]], 'extensions': ('py', ), 'files': (), 'template': os.path.abspath( os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'project_template')), } extra.update(options) from django.core.management import call_command call_command('startproject', 'conf', directory, **extra)
// ... existing code ... import os import logging from django.core.management.base import BaseCommand from optparse import make_option // ... modified code ... logging.info("Initializing project structure in %s", directory) os.makedirs(directory) from django.conf.global_settings import LANGUAGES // ... rest of the code ...
022bbf819b3c4a14ade4100102d251eceb84c637
tests/test_bijection.py
tests/test_bijection.py
"""Test bijection class.""" import pytest from collections_extended.bijection import bijection def test_bijection(): """General tests for bijection.""" b = bijection() assert len(b) == 0 b['a'] = 1 assert len(b) == 1 assert b['a'] == 1 assert b.inverse[1] == 'a' assert 'a' in b assert 1 not in b assert 1 in b.inverse with pytest.raises(KeyError): del b['f'] assert b == bijection(a=1) assert b.inverse.inverse is b assert b == b.copy() del b['a'] assert b == bijection() assert bijection(a=1, b=2, c=3) == bijection({'a': 1, 'b': 2, 'c': 3}) b['a'] = 1 b.inverse[1] = 'b' assert 'b' in b assert b['b'] == 1 assert 'a' not in b
"""Test bijection class.""" import pytest from collections_extended.bijection import bijection def test_bijection(): """General tests for bijection.""" b = bijection() assert len(b) == 0 b['a'] = 1 assert len(b) == 1 assert b['a'] == 1 assert b.inverse[1] == 'a' assert 'a' in b assert 1 not in b assert 1 in b.inverse with pytest.raises(KeyError): del b['f'] assert b == bijection(a=1) assert b.inverse.inverse is b assert b == b.copy() del b['a'] assert b == bijection() assert bijection(a=1, b=2, c=3) == bijection({'a': 1, 'b': 2, 'c': 3}) b['a'] = 1 b.inverse[1] = 'b' assert 'b' in b assert b['b'] == 1 assert 'a' not in b def test_init_from_pairs(): assert bijection({'a': 1, 'b': 2}) == bijection((('a', 1), ('b', 2)))
Add test for bijection init from list of pairs
Add test for bijection init from list of pairs
Python
apache-2.0
mlenzen/collections-extended
"""Test bijection class.""" import pytest from collections_extended.bijection import bijection def test_bijection(): """General tests for bijection.""" b = bijection() assert len(b) == 0 b['a'] = 1 assert len(b) == 1 assert b['a'] == 1 assert b.inverse[1] == 'a' assert 'a' in b assert 1 not in b assert 1 in b.inverse with pytest.raises(KeyError): del b['f'] assert b == bijection(a=1) assert b.inverse.inverse is b assert b == b.copy() del b['a'] assert b == bijection() assert bijection(a=1, b=2, c=3) == bijection({'a': 1, 'b': 2, 'c': 3}) b['a'] = 1 b.inverse[1] = 'b' assert 'b' in b assert b['b'] == 1 assert 'a' not in b + + def test_init_from_pairs(): + assert bijection({'a': 1, 'b': 2}) == bijection((('a', 1), ('b', 2))) +
Add test for bijection init from list of pairs
## Code Before: """Test bijection class.""" import pytest from collections_extended.bijection import bijection def test_bijection(): """General tests for bijection.""" b = bijection() assert len(b) == 0 b['a'] = 1 assert len(b) == 1 assert b['a'] == 1 assert b.inverse[1] == 'a' assert 'a' in b assert 1 not in b assert 1 in b.inverse with pytest.raises(KeyError): del b['f'] assert b == bijection(a=1) assert b.inverse.inverse is b assert b == b.copy() del b['a'] assert b == bijection() assert bijection(a=1, b=2, c=3) == bijection({'a': 1, 'b': 2, 'c': 3}) b['a'] = 1 b.inverse[1] = 'b' assert 'b' in b assert b['b'] == 1 assert 'a' not in b ## Instruction: Add test for bijection init from list of pairs ## Code After: """Test bijection class.""" import pytest from collections_extended.bijection import bijection def test_bijection(): """General tests for bijection.""" b = bijection() assert len(b) == 0 b['a'] = 1 assert len(b) == 1 assert b['a'] == 1 assert b.inverse[1] == 'a' assert 'a' in b assert 1 not in b assert 1 in b.inverse with pytest.raises(KeyError): del b['f'] assert b == bijection(a=1) assert b.inverse.inverse is b assert b == b.copy() del b['a'] assert b == bijection() assert bijection(a=1, b=2, c=3) == bijection({'a': 1, 'b': 2, 'c': 3}) b['a'] = 1 b.inverse[1] = 'b' assert 'b' in b assert b['b'] == 1 assert 'a' not in b def test_init_from_pairs(): assert bijection({'a': 1, 'b': 2}) == bijection((('a', 1), ('b', 2)))
# ... existing code ... assert 'a' not in b def test_init_from_pairs(): assert bijection({'a': 1, 'b': 2}) == bijection((('a', 1), ('b', 2))) # ... rest of the code ...
a040d06de7624371122960788aff241994ae08f8
metadata/SnowDegreeDay/hooks/pre-stage.py
metadata/SnowDegreeDay/hooks/pre-stage.py
import os import shutil from wmt.config import site from wmt.models.submissions import prepend_to_path from wmt.utils.hook import find_simulation_input_file from topoflow_utils.hook import assign_parameters file_list = ['rti_file', 'pixel_file'] def execute(env): """Perform pre-stage tasks for running a component. Parameters ---------- env : dict A dict of component parameter values from WMT. """ env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt']))) env['save_grid_dt'] = float(env['dt']) env['save_pixels_dt'] = float(env['dt']) # TopoFlow needs site_prefix and case_prefix. env['site_prefix'] = os.path.splitext(env['rti_file'])[0] env['case_prefix'] = 'WMT' # If no pixel_file is given, let TopoFlow make one. if env['pixel_file'] == 'off': file_list.remove('pixel_file') env['pixel_file'] = env['case_prefix'] + '_outlets.txt' assign_parameters(env, file_list) # Default files common to all TopoFlow components are stored with the # topoflow component metadata. prepend_to_path('WMT_INPUT_FILE_PATH', os.path.join(site['db'], 'components', 'topoflow', 'files')) for fname in file_list: src = find_simulation_input_file(env[fname]) shutil.copy(src, os.curdir)
import os import shutil from wmt.config import site from wmt.utils.hook import find_simulation_input_file from topoflow_utils.hook import assign_parameters, scalar_to_rtg_file file_list = [] def execute(env): """Perform pre-stage tasks for running a component. Parameters ---------- env : dict A dict of component parameter values from WMT. """ env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt']))) env['save_grid_dt'] = float(env['dt']) env['save_pixels_dt'] = float(env['dt']) assign_parameters(env, file_list) for fname in file_list: src = find_simulation_input_file(env[fname]) shutil.copy(src, os.curdir) src = find_simulation_input_file(env['site_prefix'] + '.rti') shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti')) for var in ('rho_snow', 'c0', 'T0', 'h0_snow', 'h0_swe'): if env[var + '_ptype'] == 'Scalar': scalar_to_rtg_file(var, env)
Update hook for SnowDegreeDay component
Update hook for SnowDegreeDay component
Python
mit
csdms/wmt-metadata
import os import shutil from wmt.config import site - from wmt.models.submissions import prepend_to_path from wmt.utils.hook import find_simulation_input_file - from topoflow_utils.hook import assign_parameters + from topoflow_utils.hook import assign_parameters, scalar_to_rtg_file + file_list = [] - file_list = ['rti_file', - 'pixel_file'] def execute(env): """Perform pre-stage tasks for running a component. Parameters ---------- env : dict A dict of component parameter values from WMT. """ env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt']))) env['save_grid_dt'] = float(env['dt']) env['save_pixels_dt'] = float(env['dt']) - # TopoFlow needs site_prefix and case_prefix. - env['site_prefix'] = os.path.splitext(env['rti_file'])[0] - env['case_prefix'] = 'WMT' - - # If no pixel_file is given, let TopoFlow make one. - if env['pixel_file'] == 'off': - file_list.remove('pixel_file') - env['pixel_file'] = env['case_prefix'] + '_outlets.txt' - assign_parameters(env, file_list) - # Default files common to all TopoFlow components are stored with the - # topoflow component metadata. - prepend_to_path('WMT_INPUT_FILE_PATH', - os.path.join(site['db'], 'components', 'topoflow', 'files')) for fname in file_list: src = find_simulation_input_file(env[fname]) shutil.copy(src, os.curdir) + src = find_simulation_input_file(env['site_prefix'] + '.rti') + shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti')) + for var in ('rho_snow', 'c0', 'T0', 'h0_snow', 'h0_swe'): + if env[var + '_ptype'] == 'Scalar': + scalar_to_rtg_file(var, env) +
Update hook for SnowDegreeDay component
## Code Before: import os import shutil from wmt.config import site from wmt.models.submissions import prepend_to_path from wmt.utils.hook import find_simulation_input_file from topoflow_utils.hook import assign_parameters file_list = ['rti_file', 'pixel_file'] def execute(env): """Perform pre-stage tasks for running a component. Parameters ---------- env : dict A dict of component parameter values from WMT. """ env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt']))) env['save_grid_dt'] = float(env['dt']) env['save_pixels_dt'] = float(env['dt']) # TopoFlow needs site_prefix and case_prefix. env['site_prefix'] = os.path.splitext(env['rti_file'])[0] env['case_prefix'] = 'WMT' # If no pixel_file is given, let TopoFlow make one. if env['pixel_file'] == 'off': file_list.remove('pixel_file') env['pixel_file'] = env['case_prefix'] + '_outlets.txt' assign_parameters(env, file_list) # Default files common to all TopoFlow components are stored with the # topoflow component metadata. prepend_to_path('WMT_INPUT_FILE_PATH', os.path.join(site['db'], 'components', 'topoflow', 'files')) for fname in file_list: src = find_simulation_input_file(env[fname]) shutil.copy(src, os.curdir) ## Instruction: Update hook for SnowDegreeDay component ## Code After: import os import shutil from wmt.config import site from wmt.utils.hook import find_simulation_input_file from topoflow_utils.hook import assign_parameters, scalar_to_rtg_file file_list = [] def execute(env): """Perform pre-stage tasks for running a component. Parameters ---------- env : dict A dict of component parameter values from WMT. """ env['n_steps'] = int(round(float(env['run_duration']) / float(env['dt']))) env['save_grid_dt'] = float(env['dt']) env['save_pixels_dt'] = float(env['dt']) assign_parameters(env, file_list) for fname in file_list: src = find_simulation_input_file(env[fname]) shutil.copy(src, os.curdir) src = find_simulation_input_file(env['site_prefix'] + '.rti') shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti')) for var in ('rho_snow', 'c0', 'T0', 'h0_snow', 'h0_swe'): if env[var + '_ptype'] == 'Scalar': scalar_to_rtg_file(var, env)
... from wmt.config import site from wmt.utils.hook import find_simulation_input_file from topoflow_utils.hook import assign_parameters, scalar_to_rtg_file ... file_list = [] ... assign_parameters(env, file_list) ... for fname in file_list: ... shutil.copy(src, os.curdir) src = find_simulation_input_file(env['site_prefix'] + '.rti') shutil.copy(src, os.path.join(os.curdir, env['site_prefix'] + '.rti')) for var in ('rho_snow', 'c0', 'T0', 'h0_snow', 'h0_swe'): if env[var + '_ptype'] == 'Scalar': scalar_to_rtg_file(var, env) ...
ceb123e78b4d15c0cfe30198aa3fbbe71603472d
project/forms.py
project/forms.py
from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import authenticate class LoginForm(forms.Form): username = forms.CharField(label=_('Naudotojo vardas'), max_length=100, help_text=_('VU MIF uosis.mif.vu.lt serverio.')) password = forms.CharField(label=_(u'Slaptažodis'), max_length=128, widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(LoginForm, self).clean() if self.errors: return cleaned_data user = authenticate(**cleaned_data) if not user: raise forms.ValidationError(_(u'Naudotojo vardas arba slaptažodis ' 'yra neteisingi')) return {'user': user}
from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import authenticate class LoginForm(forms.Form): username = forms.CharField(label=_('Naudotojo vardas'), max_length=100, help_text=_('VU MIF uosis.mif.vu.lt serverio.')) password = forms.CharField(label=_(u'Slaptažodis'), max_length=128, widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(LoginForm, self).clean() if self.errors: return cleaned_data user = authenticate(**cleaned_data) if not user: raise forms.ValidationError(_(u'Naudotojo vardas arba slaptažodis ' 'yra neteisingi')) cleaned_data['user'] = user return cleaned_data
Update login form clean method to return full cleaned data.
Update login form clean method to return full cleaned data.
Python
agpl-3.0
InScience/DAMIS-old,InScience/DAMIS-old
from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import authenticate class LoginForm(forms.Form): username = forms.CharField(label=_('Naudotojo vardas'), max_length=100, help_text=_('VU MIF uosis.mif.vu.lt serverio.')) password = forms.CharField(label=_(u'Slaptažodis'), max_length=128, widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(LoginForm, self).clean() if self.errors: return cleaned_data user = authenticate(**cleaned_data) if not user: raise forms.ValidationError(_(u'Naudotojo vardas arba slaptažodis ' 'yra neteisingi')) - return {'user': user} + cleaned_data['user'] = user + return cleaned_data
Update login form clean method to return full cleaned data.
## Code Before: from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import authenticate class LoginForm(forms.Form): username = forms.CharField(label=_('Naudotojo vardas'), max_length=100, help_text=_('VU MIF uosis.mif.vu.lt serverio.')) password = forms.CharField(label=_(u'Slaptažodis'), max_length=128, widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(LoginForm, self).clean() if self.errors: return cleaned_data user = authenticate(**cleaned_data) if not user: raise forms.ValidationError(_(u'Naudotojo vardas arba slaptažodis ' 'yra neteisingi')) return {'user': user} ## Instruction: Update login form clean method to return full cleaned data. ## Code After: from django import forms from django.utils.translation import ugettext_lazy as _ from django.contrib.auth import authenticate class LoginForm(forms.Form): username = forms.CharField(label=_('Naudotojo vardas'), max_length=100, help_text=_('VU MIF uosis.mif.vu.lt serverio.')) password = forms.CharField(label=_(u'Slaptažodis'), max_length=128, widget=forms.PasswordInput(render_value=False)) def clean(self): cleaned_data = super(LoginForm, self).clean() if self.errors: return cleaned_data user = authenticate(**cleaned_data) if not user: raise forms.ValidationError(_(u'Naudotojo vardas arba slaptažodis ' 'yra neteisingi')) cleaned_data['user'] = user return cleaned_data
// ... existing code ... 'yra neteisingi')) cleaned_data['user'] = user return cleaned_data // ... rest of the code ...
850fba4b07e4c444aa8640c6f4c3816f8a3259ea
website_medical_patient_species/controllers/main.py
website_medical_patient_species/controllers/main.py
from openerp import http from openerp.http import request from openerp.addons.website_medical.controllers.main import ( WebsiteMedical ) class WebsiteMedical(WebsiteMedical): def _inject_medical_detail_vals(self, patient_id=0, **kwargs): vals = super(WebsiteMedical, self)._inject_medical_detail_vals( patient_id, **kwargs ) species_ids = request.env['medical.patient.species'].search([]) vals.update({ 'species': species_ids, }) return vals
from openerp.http import request from openerp.addons.website_medical.controllers.main import ( WebsiteMedical ) class WebsiteMedical(WebsiteMedical): def _inject_medical_detail_vals(self, patient_id=0, **kwargs): vals = super(WebsiteMedical, self)._inject_medical_detail_vals( patient_id, **kwargs ) species_ids = request.env['medical.patient.species'].search([]) vals.update({ 'species': species_ids, }) return vals
Fix lint * Remove stray import to fix lint
[FIX] website_medical_patient_species: Fix lint * Remove stray import to fix lint
Python
agpl-3.0
laslabs/vertical-medical,laslabs/vertical-medical
- from openerp import http from openerp.http import request from openerp.addons.website_medical.controllers.main import ( WebsiteMedical ) class WebsiteMedical(WebsiteMedical): def _inject_medical_detail_vals(self, patient_id=0, **kwargs): vals = super(WebsiteMedical, self)._inject_medical_detail_vals( patient_id, **kwargs ) species_ids = request.env['medical.patient.species'].search([]) vals.update({ 'species': species_ids, }) return vals
Fix lint * Remove stray import to fix lint
## Code Before: from openerp import http from openerp.http import request from openerp.addons.website_medical.controllers.main import ( WebsiteMedical ) class WebsiteMedical(WebsiteMedical): def _inject_medical_detail_vals(self, patient_id=0, **kwargs): vals = super(WebsiteMedical, self)._inject_medical_detail_vals( patient_id, **kwargs ) species_ids = request.env['medical.patient.species'].search([]) vals.update({ 'species': species_ids, }) return vals ## Instruction: Fix lint * Remove stray import to fix lint ## Code After: from openerp.http import request from openerp.addons.website_medical.controllers.main import ( WebsiteMedical ) class WebsiteMedical(WebsiteMedical): def _inject_medical_detail_vals(self, patient_id=0, **kwargs): vals = super(WebsiteMedical, self)._inject_medical_detail_vals( patient_id, **kwargs ) species_ids = request.env['medical.patient.species'].search([]) vals.update({ 'species': species_ids, }) return vals
... from openerp.http import request ...
093c9065de9e0e08f248bbb84696bf30309bd536
examples/parallel/timer.py
examples/parallel/timer.py
import rx import concurrent.futures import time seconds = [5, 1, 2, 4, 3] def sleep(t): time.sleep(t) return t def output(result): print '%d seconds' % result with concurrent.futures.ProcessPoolExecutor(5) as executor: rx.Observable.from_(seconds).flat_map( lambda s: executor.submit(sleep, s) ).subscribe(output) # 1 seconds # 2 seconds # 3 seconds # 4 seconds # 5 seconds
from __future__ import print_function import rx import concurrent.futures import time seconds = [5, 1, 2, 4, 3] def sleep(t): time.sleep(t) return t def output(result): print('%d seconds' % result) with concurrent.futures.ProcessPoolExecutor(5) as executor: rx.Observable.from_(seconds).flat_map( lambda s: executor.submit(sleep, s) ).subscribe(output) # 1 seconds # 2 seconds # 3 seconds # 4 seconds # 5 seconds
Fix parallel example for Python 3
Fix parallel example for Python 3
Python
mit
dbrattli/RxPY,ReactiveX/RxPY,ReactiveX/RxPY
+ from __future__ import print_function + import rx import concurrent.futures import time seconds = [5, 1, 2, 4, 3] def sleep(t): time.sleep(t) return t def output(result): - print '%d seconds' % result + print('%d seconds' % result) with concurrent.futures.ProcessPoolExecutor(5) as executor: rx.Observable.from_(seconds).flat_map( lambda s: executor.submit(sleep, s) ).subscribe(output) # 1 seconds # 2 seconds # 3 seconds # 4 seconds # 5 seconds
Fix parallel example for Python 3
## Code Before: import rx import concurrent.futures import time seconds = [5, 1, 2, 4, 3] def sleep(t): time.sleep(t) return t def output(result): print '%d seconds' % result with concurrent.futures.ProcessPoolExecutor(5) as executor: rx.Observable.from_(seconds).flat_map( lambda s: executor.submit(sleep, s) ).subscribe(output) # 1 seconds # 2 seconds # 3 seconds # 4 seconds # 5 seconds ## Instruction: Fix parallel example for Python 3 ## Code After: from __future__ import print_function import rx import concurrent.futures import time seconds = [5, 1, 2, 4, 3] def sleep(t): time.sleep(t) return t def output(result): print('%d seconds' % result) with concurrent.futures.ProcessPoolExecutor(5) as executor: rx.Observable.from_(seconds).flat_map( lambda s: executor.submit(sleep, s) ).subscribe(output) # 1 seconds # 2 seconds # 3 seconds # 4 seconds # 5 seconds
// ... existing code ... from __future__ import print_function import rx // ... modified code ... def output(result): print('%d seconds' % result) // ... rest of the code ...
0aa1fb5d7f4eca6423a7d4b5cdd166bf29f48423
ordering/__init__.py
ordering/__init__.py
from fractions import Fraction class Ordering: _start = object() _end = object() def __init__(self): self._labels = { self._start: Fraction(0), self._end: Fraction(1) } self._successors = { self._start: self._end } self._predecessors = { self._end: self._start } def insert_after(self, existing_item, new_item): self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2 self._successors[new_item] = self._successors[existing_item] self._predecessors[new_item] = existing_item self._predecessors[self._successors[existing_item]] = new_item self._successors[existing_item] = new_item def insert_before(self, existing_item, new_item): self.insert_after(self._predecessors[existing_item], new_item) def insert_start(self, new_item): self.insert_after(self._start, new_item) def insert_end(self, new_item): self.insert_before(self._end, new_item) def compare(self, left_item, right_item): return self._labels[left_item] < self._labels[right_item]
from fractions import Fraction from functools import total_ordering class Ordering: _start = object() _end = object() def __init__(self): self._labels = { self._start: Fraction(0), self._end: Fraction(1) } self._successors = { self._start: self._end } self._predecessors = { self._end: self._start } def insert_after(self, existing_item, new_item): self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2 self._successors[new_item] = self._successors[existing_item] self._predecessors[new_item] = existing_item self._predecessors[self._successors[existing_item]] = new_item self._successors[existing_item] = new_item return OrderingItem(self, new_item) def insert_before(self, existing_item, new_item): return self.insert_after(self._predecessors[existing_item], new_item) def insert_start(self, new_item): return self.insert_after(self._start, new_item) def insert_end(self, new_item): return self.insert_before(self._end, new_item) def compare(self, left_item, right_item): return self._labels[left_item] < self._labels[right_item] @total_ordering class OrderingItem: def __init__(self, ordering, item): self.ordering = ordering self.item = item def __lt__(self, other): return self.ordering.compare(self.item, other.item)
Add class representing an element in the ordering
Add class representing an element in the ordering
Python
mit
madman-bob/python-order-maintenance
from fractions import Fraction + from functools import total_ordering class Ordering: _start = object() _end = object() def __init__(self): self._labels = { self._start: Fraction(0), self._end: Fraction(1) } self._successors = { self._start: self._end } self._predecessors = { self._end: self._start } def insert_after(self, existing_item, new_item): self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2 self._successors[new_item] = self._successors[existing_item] self._predecessors[new_item] = existing_item self._predecessors[self._successors[existing_item]] = new_item self._successors[existing_item] = new_item + return OrderingItem(self, new_item) + def insert_before(self, existing_item, new_item): - self.insert_after(self._predecessors[existing_item], new_item) + return self.insert_after(self._predecessors[existing_item], new_item) def insert_start(self, new_item): - self.insert_after(self._start, new_item) + return self.insert_after(self._start, new_item) def insert_end(self, new_item): - self.insert_before(self._end, new_item) + return self.insert_before(self._end, new_item) def compare(self, left_item, right_item): return self._labels[left_item] < self._labels[right_item] + + @total_ordering + class OrderingItem: + def __init__(self, ordering, item): + self.ordering = ordering + self.item = item + + def __lt__(self, other): + return self.ordering.compare(self.item, other.item) +
Add class representing an element in the ordering
## Code Before: from fractions import Fraction class Ordering: _start = object() _end = object() def __init__(self): self._labels = { self._start: Fraction(0), self._end: Fraction(1) } self._successors = { self._start: self._end } self._predecessors = { self._end: self._start } def insert_after(self, existing_item, new_item): self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2 self._successors[new_item] = self._successors[existing_item] self._predecessors[new_item] = existing_item self._predecessors[self._successors[existing_item]] = new_item self._successors[existing_item] = new_item def insert_before(self, existing_item, new_item): self.insert_after(self._predecessors[existing_item], new_item) def insert_start(self, new_item): self.insert_after(self._start, new_item) def insert_end(self, new_item): self.insert_before(self._end, new_item) def compare(self, left_item, right_item): return self._labels[left_item] < self._labels[right_item] ## Instruction: Add class representing an element in the ordering ## Code After: from fractions import Fraction from functools import total_ordering class Ordering: _start = object() _end = object() def __init__(self): self._labels = { self._start: Fraction(0), self._end: Fraction(1) } self._successors = { self._start: self._end } self._predecessors = { self._end: self._start } def insert_after(self, existing_item, new_item): self._labels[new_item] = (self._labels[existing_item] + self._labels[self._successors[existing_item]]) / 2 self._successors[new_item] = self._successors[existing_item] self._predecessors[new_item] = existing_item self._predecessors[self._successors[existing_item]] = new_item self._successors[existing_item] = new_item return OrderingItem(self, new_item) def insert_before(self, existing_item, new_item): return self.insert_after(self._predecessors[existing_item], new_item) def insert_start(self, new_item): return self.insert_after(self._start, new_item) def insert_end(self, new_item): return self.insert_before(self._end, new_item) def compare(self, left_item, right_item): return self._labels[left_item] < self._labels[right_item] @total_ordering class OrderingItem: def __init__(self, ordering, item): self.ordering = ordering self.item = item def __lt__(self, other): return self.ordering.compare(self.item, other.item)
# ... existing code ... from fractions import Fraction from functools import total_ordering # ... modified code ... return OrderingItem(self, new_item) def insert_before(self, existing_item, new_item): return self.insert_after(self._predecessors[existing_item], new_item) ... def insert_start(self, new_item): return self.insert_after(self._start, new_item) ... def insert_end(self, new_item): return self.insert_before(self._end, new_item) ... return self._labels[left_item] < self._labels[right_item] @total_ordering class OrderingItem: def __init__(self, ordering, item): self.ordering = ordering self.item = item def __lt__(self, other): return self.ordering.compare(self.item, other.item) # ... rest of the code ...
88fc0f980f0efa403ab5ce7d6775bce008b284fc
_setup_database.py
_setup_database.py
import argparse from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions from setup.create_players import migrate_players from setup.create_player_seasons import create_player_seasons from setup.create_player_seasons import create_player_data from utils import prepare_logging prepare_logging(log_types=['file', 'screen']) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Setup script for NHL database creation.') parser.add_argument( 'steps', metavar='setup_steps', help='Setup steps to execute.', choices=['a', 't', 'd', 'p', 'ps', 'pd']) args = parser.parse_args() setup_steps = args.steps # migrating teams from json file to database if setup_steps in ['t', 'a']: migrate_teams(simulation=True) # creating divisions from division configuration file if setup_steps in ['d', 'a']: create_divisions(simulation=True) # migrating players from json file to database if setup_steps in ['p', 'a']: migrate_players(simulation=True) # retrieving player season statistics for all players in database if setup_steps in ['ps', 'a']: create_player_seasons(simulation=False) # retrieving individual player data for all players in database if setup_steps in ['pd', 'a']: create_player_data(simulation=False)
import argparse from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions from setup.create_players import migrate_players from setup.create_player_seasons import create_player_seasons from setup.create_player_seasons import create_player_data from setup.create_player_seasons import create_player_contracts from utils import prepare_logging prepare_logging(log_types=['file', 'screen']) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Setup script for NHL database creation.') parser.add_argument( 'steps', metavar='setup_steps', help='Setup steps to execute.', choices=['a', 'c', 't', 'd', 'p', 'ps', 'pd']) args = parser.parse_args() setup_steps = args.steps # migrating teams from json file to database if setup_steps in ['t', 'a']: migrate_teams(simulation=True) # creating divisions from division configuration file if setup_steps in ['d', 'a']: create_divisions(simulation=True) # migrating players from json file to database if setup_steps in ['p', 'a']: migrate_players(simulation=True) # retrieving player season statistics for all players in database if setup_steps in ['ps', 'a']: create_player_seasons(simulation=False) # retrieving individual player data for all players in database if setup_steps in ['pd', 'a']: create_player_data(simulation=False) if setup_steps in ['c']: create_player_contracts(simulation=False)
Add contract retrieval option to database setup script
Add contract retrieval option to database setup script
Python
mit
leaffan/pynhldb
import argparse from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions from setup.create_players import migrate_players from setup.create_player_seasons import create_player_seasons from setup.create_player_seasons import create_player_data + from setup.create_player_seasons import create_player_contracts from utils import prepare_logging prepare_logging(log_types=['file', 'screen']) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Setup script for NHL database creation.') parser.add_argument( 'steps', metavar='setup_steps', help='Setup steps to execute.', - choices=['a', 't', 'd', 'p', 'ps', 'pd']) + choices=['a', 'c', 't', 'd', 'p', 'ps', 'pd']) args = parser.parse_args() setup_steps = args.steps # migrating teams from json file to database if setup_steps in ['t', 'a']: migrate_teams(simulation=True) # creating divisions from division configuration file if setup_steps in ['d', 'a']: create_divisions(simulation=True) # migrating players from json file to database if setup_steps in ['p', 'a']: migrate_players(simulation=True) # retrieving player season statistics for all players in database if setup_steps in ['ps', 'a']: create_player_seasons(simulation=False) # retrieving individual player data for all players in database if setup_steps in ['pd', 'a']: create_player_data(simulation=False) + if setup_steps in ['c']: + create_player_contracts(simulation=False) +
Add contract retrieval option to database setup script
## Code Before: import argparse from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions from setup.create_players import migrate_players from setup.create_player_seasons import create_player_seasons from setup.create_player_seasons import create_player_data from utils import prepare_logging prepare_logging(log_types=['file', 'screen']) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Setup script for NHL database creation.') parser.add_argument( 'steps', metavar='setup_steps', help='Setup steps to execute.', choices=['a', 't', 'd', 'p', 'ps', 'pd']) args = parser.parse_args() setup_steps = args.steps # migrating teams from json file to database if setup_steps in ['t', 'a']: migrate_teams(simulation=True) # creating divisions from division configuration file if setup_steps in ['d', 'a']: create_divisions(simulation=True) # migrating players from json file to database if setup_steps in ['p', 'a']: migrate_players(simulation=True) # retrieving player season statistics for all players in database if setup_steps in ['ps', 'a']: create_player_seasons(simulation=False) # retrieving individual player data for all players in database if setup_steps in ['pd', 'a']: create_player_data(simulation=False) ## Instruction: Add contract retrieval option to database setup script ## Code After: import argparse from setup.create_teams import migrate_teams from setup.create_divisions import create_divisions from setup.create_players import migrate_players from setup.create_player_seasons import create_player_seasons from setup.create_player_seasons import create_player_data from setup.create_player_seasons import create_player_contracts from utils import prepare_logging prepare_logging(log_types=['file', 'screen']) if __name__ == '__main__': parser = argparse.ArgumentParser( description='Setup script for NHL database creation.') parser.add_argument( 'steps', metavar='setup_steps', help='Setup steps to execute.', choices=['a', 'c', 't', 'd', 'p', 'ps', 'pd']) args = parser.parse_args() setup_steps = args.steps # migrating teams from json file to database if setup_steps in ['t', 'a']: migrate_teams(simulation=True) # creating divisions from division configuration file if setup_steps in ['d', 'a']: create_divisions(simulation=True) # migrating players from json file to database if setup_steps in ['p', 'a']: migrate_players(simulation=True) # retrieving player season statistics for all players in database if setup_steps in ['ps', 'a']: create_player_seasons(simulation=False) # retrieving individual player data for all players in database if setup_steps in ['pd', 'a']: create_player_data(simulation=False) if setup_steps in ['c']: create_player_contracts(simulation=False)
# ... existing code ... from setup.create_player_seasons import create_player_data from setup.create_player_seasons import create_player_contracts # ... modified code ... 'steps', metavar='setup_steps', help='Setup steps to execute.', choices=['a', 'c', 't', 'd', 'p', 'ps', 'pd']) ... create_player_data(simulation=False) if setup_steps in ['c']: create_player_contracts(simulation=False) # ... rest of the code ...
9a94e9e61a7bb1680265692eb7cdf926842aa766
streamline/__init__.py
streamline/__init__.py
from .base import RouteBase, NonIterableRouteBase from .template import TemplateRoute, XHRPartialRoute, ROCARoute from .forms import FormRoute, TemplateFormRoute, XHRPartialFormRoute __version__ = '1.0' __author__ = 'Outernet Inc' __all__ = ( RouteBase, NonIterableRouteBase, TemplateRoute, XHRPartialRoute, ROCARoute, FormRoute, TemplateFormRoute, XHRPartialFormRoute, )
from .base import RouteBase, NonIterableRouteBase from .template import TemplateRoute, XHRPartialRoute, ROCARoute from .forms import FormRoute, TemplateFormRoute, XHRPartialFormRoute __version__ = '1.0' __author__ = 'Outernet Inc' __all__ = ( 'RouteBase', 'NonIterableRouteBase', 'TemplateRoute', 'XHRPartialRoute', 'ROCARoute', 'FormRoute', 'TemplateFormRoute', 'XHRPartialFormRoute', )
Fix __all__ using objects instead of strings
Fix __all__ using objects instead of strings Signed-off-by: Branko Vukelic <[email protected]>
Python
bsd-2-clause
Outernet-Project/bottle-streamline
from .base import RouteBase, NonIterableRouteBase from .template import TemplateRoute, XHRPartialRoute, ROCARoute from .forms import FormRoute, TemplateFormRoute, XHRPartialFormRoute __version__ = '1.0' __author__ = 'Outernet Inc' __all__ = ( - RouteBase, + 'RouteBase', - NonIterableRouteBase, + 'NonIterableRouteBase', - TemplateRoute, + 'TemplateRoute', - XHRPartialRoute, + 'XHRPartialRoute', - ROCARoute, + 'ROCARoute', - FormRoute, + 'FormRoute', - TemplateFormRoute, + 'TemplateFormRoute', - XHRPartialFormRoute, + 'XHRPartialFormRoute', )
Fix __all__ using objects instead of strings
## Code Before: from .base import RouteBase, NonIterableRouteBase from .template import TemplateRoute, XHRPartialRoute, ROCARoute from .forms import FormRoute, TemplateFormRoute, XHRPartialFormRoute __version__ = '1.0' __author__ = 'Outernet Inc' __all__ = ( RouteBase, NonIterableRouteBase, TemplateRoute, XHRPartialRoute, ROCARoute, FormRoute, TemplateFormRoute, XHRPartialFormRoute, ) ## Instruction: Fix __all__ using objects instead of strings ## Code After: from .base import RouteBase, NonIterableRouteBase from .template import TemplateRoute, XHRPartialRoute, ROCARoute from .forms import FormRoute, TemplateFormRoute, XHRPartialFormRoute __version__ = '1.0' __author__ = 'Outernet Inc' __all__ = ( 'RouteBase', 'NonIterableRouteBase', 'TemplateRoute', 'XHRPartialRoute', 'ROCARoute', 'FormRoute', 'TemplateFormRoute', 'XHRPartialFormRoute', )
# ... existing code ... __all__ = ( 'RouteBase', 'NonIterableRouteBase', 'TemplateRoute', 'XHRPartialRoute', 'ROCARoute', 'FormRoute', 'TemplateFormRoute', 'XHRPartialFormRoute', ) # ... rest of the code ...
95eb2c11a4f35e594eda25c10bdf85a25b2f4392
src/ConfigLoader.py
src/ConfigLoader.py
import json import sys def load_config_file(out=sys.stdout): default_filepath = "../resources/config/default-config.json" user_filepath = "../resources/config/user-config.json" try: default_json = read_json(default_filepath) user_json = read_json(user_filepath) for property in user_json: default_json[property] = user_json[property] except FileNotFoundError as e: out.write("Cannot find file: " + e.filename) else: out.write("Read styling config JSON correctly.") return default_json def read_json(filepath): config_string = '' with open(filepath) as f: for line in f: line = line.lstrip() if not line.startswith("//"): config_string += line config_json = json.loads(config_string) return config_json if __name__ == "__main__": load_config_file()
import json import sys def load_config_file(out=sys.stdout): if sys.argv[0].endswith('nosetests'): default_filepath = "./resources/config/default-config.json" user_filepath = "./resources/config/user-config.json" else: default_filepath = "../resources/config/default-config.json" user_filepath = "../resources/config/user-config.json" try: default_json = read_json(default_filepath) user_json = read_json(user_filepath) for property in user_json: default_json[property] = user_json[property] except FileNotFoundError as e: out.write("Cannot find file: " + e.filename) else: out.write("Read styling config JSON correctly.") return default_json def read_json(filepath): config_string = '' with open(filepath) as f: for line in f: line = line.lstrip() if not line.startswith("//"): config_string += line config_json = json.loads(config_string) return config_json if __name__ == "__main__": load_config_file()
Fix nosetests for config file loading
Fix nosetests for config file loading
Python
bsd-3-clause
sky-uk/bslint
import json import sys def load_config_file(out=sys.stdout): + if sys.argv[0].endswith('nosetests'): + default_filepath = "./resources/config/default-config.json" + user_filepath = "./resources/config/user-config.json" + else: - default_filepath = "../resources/config/default-config.json" + default_filepath = "../resources/config/default-config.json" - user_filepath = "../resources/config/user-config.json" + user_filepath = "../resources/config/user-config.json" try: default_json = read_json(default_filepath) user_json = read_json(user_filepath) for property in user_json: default_json[property] = user_json[property] except FileNotFoundError as e: out.write("Cannot find file: " + e.filename) else: out.write("Read styling config JSON correctly.") return default_json def read_json(filepath): config_string = '' with open(filepath) as f: for line in f: line = line.lstrip() if not line.startswith("//"): config_string += line config_json = json.loads(config_string) return config_json if __name__ == "__main__": load_config_file()
Fix nosetests for config file loading
## Code Before: import json import sys def load_config_file(out=sys.stdout): default_filepath = "../resources/config/default-config.json" user_filepath = "../resources/config/user-config.json" try: default_json = read_json(default_filepath) user_json = read_json(user_filepath) for property in user_json: default_json[property] = user_json[property] except FileNotFoundError as e: out.write("Cannot find file: " + e.filename) else: out.write("Read styling config JSON correctly.") return default_json def read_json(filepath): config_string = '' with open(filepath) as f: for line in f: line = line.lstrip() if not line.startswith("//"): config_string += line config_json = json.loads(config_string) return config_json if __name__ == "__main__": load_config_file() ## Instruction: Fix nosetests for config file loading ## Code After: import json import sys def load_config_file(out=sys.stdout): if sys.argv[0].endswith('nosetests'): default_filepath = "./resources/config/default-config.json" user_filepath = "./resources/config/user-config.json" else: default_filepath = "../resources/config/default-config.json" user_filepath = "../resources/config/user-config.json" try: default_json = read_json(default_filepath) user_json = read_json(user_filepath) for property in user_json: default_json[property] = user_json[property] except FileNotFoundError as e: out.write("Cannot find file: " + e.filename) else: out.write("Read styling config JSON correctly.") return default_json def read_json(filepath): config_string = '' with open(filepath) as f: for line in f: line = line.lstrip() if not line.startswith("//"): config_string += line config_json = json.loads(config_string) return config_json if __name__ == "__main__": load_config_file()
// ... existing code ... def load_config_file(out=sys.stdout): if sys.argv[0].endswith('nosetests'): default_filepath = "./resources/config/default-config.json" user_filepath = "./resources/config/user-config.json" else: default_filepath = "../resources/config/default-config.json" user_filepath = "../resources/config/user-config.json" // ... rest of the code ...
43f67067c470386b6b24080642cc845ec1655f58
utils/networking.py
utils/networking.py
import fcntl import socket import struct from contextlib import contextmanager @contextmanager def use_interface(ifname): """ :type ifname: str """ ip = _ip_address_for_interface(ifname.encode('ascii')) original_socket = socket.socket def rebound_socket(*args, **kwargs): sock = original_socket(*args, **kwargs) sock.bind((ip, 0)) return sock socket.socket = rebound_socket yield socket.socket = original_socket def _ip_address_for_interface(ifname): """ :type ifname: bytes :rtype: str """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
import fcntl import socket import struct from contextlib import contextmanager @contextmanager def use_interface(ifname): """ :type ifname: str """ ip = _ip_address_for_interface(ifname) original_socket = socket.socket def rebound_socket(*args, **kwargs): sock = original_socket(*args, **kwargs) sock.bind((ip, 0)) return sock socket.socket = rebound_socket yield socket.socket = original_socket def _ip_address_for_interface(ifname): """ :type ifname: str :rtype: str """ ifname = ifname.encode('ascii') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
Make _ip_address_for_interface easier to use
Make _ip_address_for_interface easier to use
Python
apache-2.0
OPWEN/opwen-webapp,ascoderu/opwen-webapp,ascoderu/opwen-webapp,OPWEN/opwen-webapp,OPWEN/opwen-webapp,ascoderu/opwen-cloudserver,ascoderu/opwen-cloudserver,ascoderu/opwen-webapp
import fcntl import socket import struct from contextlib import contextmanager @contextmanager def use_interface(ifname): """ :type ifname: str """ - ip = _ip_address_for_interface(ifname.encode('ascii')) + ip = _ip_address_for_interface(ifname) original_socket = socket.socket def rebound_socket(*args, **kwargs): sock = original_socket(*args, **kwargs) sock.bind((ip, 0)) return sock socket.socket = rebound_socket yield socket.socket = original_socket def _ip_address_for_interface(ifname): """ - :type ifname: bytes + :type ifname: str :rtype: str """ + ifname = ifname.encode('ascii') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
Make _ip_address_for_interface easier to use
## Code Before: import fcntl import socket import struct from contextlib import contextmanager @contextmanager def use_interface(ifname): """ :type ifname: str """ ip = _ip_address_for_interface(ifname.encode('ascii')) original_socket = socket.socket def rebound_socket(*args, **kwargs): sock = original_socket(*args, **kwargs) sock.bind((ip, 0)) return sock socket.socket = rebound_socket yield socket.socket = original_socket def _ip_address_for_interface(ifname): """ :type ifname: bytes :rtype: str """ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) ## Instruction: Make _ip_address_for_interface easier to use ## Code After: import fcntl import socket import struct from contextlib import contextmanager @contextmanager def use_interface(ifname): """ :type ifname: str """ ip = _ip_address_for_interface(ifname) original_socket = socket.socket def rebound_socket(*args, **kwargs): sock = original_socket(*args, **kwargs) sock.bind((ip, 0)) return sock socket.socket = rebound_socket yield socket.socket = original_socket def _ip_address_for_interface(ifname): """ :type ifname: str :rtype: str """ ifname = ifname.encode('ascii') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24])
# ... existing code ... """ ip = _ip_address_for_interface(ifname) original_socket = socket.socket # ... modified code ... """ :type ifname: str :rtype: str ... """ ifname = ifname.encode('ascii') sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # ... rest of the code ...
4bc9d1b51cd735c366edce81cd4e36e2eca904c7
worker/models/spotify_artist.py
worker/models/spotify_artist.py
from spotify_item import SpotifyItem class Artist(SpotifyItem): def __init__(self, **entries): super(Artist, self).__init__(**entries) def __repr__(self): return '<Artist: {0}>'.format(self.name)
from spotify_item import SpotifyItem from pyechonest import config from pyechonest import artist from worker.config import ECHO_NEST_API_KEY config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY class Artist(SpotifyItem): def __init__(self, **entries): super(Artist, self).__init__(**entries) self.echonest = artist.Artist(self.uri) def __repr__(self): return '<Artist: {0}>'.format(self.name)
Add echo nest to artist model
Add echo nest to artist model
Python
mit
projectweekend/song-feed-worker
from spotify_item import SpotifyItem + from pyechonest import config + from pyechonest import artist + from worker.config import ECHO_NEST_API_KEY + + + config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY class Artist(SpotifyItem): def __init__(self, **entries): super(Artist, self).__init__(**entries) + self.echonest = artist.Artist(self.uri) def __repr__(self): return '<Artist: {0}>'.format(self.name)
Add echo nest to artist model
## Code Before: from spotify_item import SpotifyItem class Artist(SpotifyItem): def __init__(self, **entries): super(Artist, self).__init__(**entries) def __repr__(self): return '<Artist: {0}>'.format(self.name) ## Instruction: Add echo nest to artist model ## Code After: from spotify_item import SpotifyItem from pyechonest import config from pyechonest import artist from worker.config import ECHO_NEST_API_KEY config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY class Artist(SpotifyItem): def __init__(self, **entries): super(Artist, self).__init__(**entries) self.echonest = artist.Artist(self.uri) def __repr__(self): return '<Artist: {0}>'.format(self.name)
// ... existing code ... from spotify_item import SpotifyItem from pyechonest import config from pyechonest import artist from worker.config import ECHO_NEST_API_KEY config.ECHO_NEST_API_KEY = ECHO_NEST_API_KEY // ... modified code ... super(Artist, self).__init__(**entries) self.echonest = artist.Artist(self.uri) // ... rest of the code ...
d08d78460d0f7143b90a5157c4f5450bb062ec75
im2sim.py
im2sim.py
import argparse import PIL from PIL import Image import subprocess import os import glob def get_image(filename): p = Image.open(filename) docker_image = p.info['im2sim_image'] return subprocess.call('docker pull {}'.format(docker_image), shell=True) print('Pulled docker image {}'.format(docker_image)) def tag_images(docker_image): subprocess.call(['mkdir', '-p', 'figures']) subprocess.call("docker run -v {}/figures:/home/pyro/pyro2/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) figures = glob.glob('{}/figures/*.png'.format(os.getcwd())) for filename in figures: p = Image.open(filename) info = PIL.PngImagePlugin.PngInfo() info.add_text('im2sim_image', docker_image) p.save(filename, pnginfo = info) return None parser = argparse.ArgumentParser() parser.add_argument("action", help="'pull', 'tag'") parser.add_argument("object", help="Figure file (if pulling)" " or docker container (if tagging)") args = parser.parse_args() print("Action {}, Object {}".format(args.action, args.object)) if args.action == 'pull': get_image(args.object) elif args.action == 'tag': tag_images(args.object) else: print("Action must be either 'pull' or 'tag'.")
import argparse import PIL from PIL import Image import subprocess import os import glob def get_image(filename): p = Image.open(filename) docker_image = p.info['im2sim_image'] return subprocess.call('docker pull {}'.format(docker_image), shell=True) print('Pulled docker image {}'.format(docker_image)) def tag_images(docker_image): subprocess.call(['mkdir', '-p', 'figures']) subprocess.call("docker run -v {}/figures:/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) figures = glob.glob('{}/figures/*.png'.format(os.getcwd())) for filename in figures: p = Image.open(filename) info = PIL.PngImagePlugin.PngInfo() info.add_text('im2sim_image', docker_image) p.save(filename, pnginfo = info) return None parser = argparse.ArgumentParser() parser.add_argument("action", help="'pull', 'tag'") parser.add_argument("object", help="Figure file (if pulling)" " or docker container (if tagging)") args = parser.parse_args() print("Action {}, Object {}".format(args.action, args.object)) if args.action == 'pull': get_image(args.object) elif args.action == 'tag': tag_images(args.object) else: print("Action must be either 'pull' or 'tag'.")
Make script consistent with instructions.
Make script consistent with instructions.
Python
mit
IanHawke/im2sim
import argparse import PIL from PIL import Image import subprocess import os import glob def get_image(filename): p = Image.open(filename) docker_image = p.info['im2sim_image'] return subprocess.call('docker pull {}'.format(docker_image), shell=True) print('Pulled docker image {}'.format(docker_image)) def tag_images(docker_image): subprocess.call(['mkdir', '-p', 'figures']) - subprocess.call("docker run -v {}/figures:/home/pyro/pyro2/figures " + subprocess.call("docker run -v {}/figures:/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) figures = glob.glob('{}/figures/*.png'.format(os.getcwd())) for filename in figures: p = Image.open(filename) info = PIL.PngImagePlugin.PngInfo() info.add_text('im2sim_image', docker_image) p.save(filename, pnginfo = info) return None parser = argparse.ArgumentParser() parser.add_argument("action", help="'pull', 'tag'") parser.add_argument("object", help="Figure file (if pulling)" " or docker container (if tagging)") args = parser.parse_args() print("Action {}, Object {}".format(args.action, args.object)) if args.action == 'pull': get_image(args.object) elif args.action == 'tag': tag_images(args.object) else: print("Action must be either 'pull' or 'tag'.")
Make script consistent with instructions.
## Code Before: import argparse import PIL from PIL import Image import subprocess import os import glob def get_image(filename): p = Image.open(filename) docker_image = p.info['im2sim_image'] return subprocess.call('docker pull {}'.format(docker_image), shell=True) print('Pulled docker image {}'.format(docker_image)) def tag_images(docker_image): subprocess.call(['mkdir', '-p', 'figures']) subprocess.call("docker run -v {}/figures:/home/pyro/pyro2/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) figures = glob.glob('{}/figures/*.png'.format(os.getcwd())) for filename in figures: p = Image.open(filename) info = PIL.PngImagePlugin.PngInfo() info.add_text('im2sim_image', docker_image) p.save(filename, pnginfo = info) return None parser = argparse.ArgumentParser() parser.add_argument("action", help="'pull', 'tag'") parser.add_argument("object", help="Figure file (if pulling)" " or docker container (if tagging)") args = parser.parse_args() print("Action {}, Object {}".format(args.action, args.object)) if args.action == 'pull': get_image(args.object) elif args.action == 'tag': tag_images(args.object) else: print("Action must be either 'pull' or 'tag'.") ## Instruction: Make script consistent with instructions. ## Code After: import argparse import PIL from PIL import Image import subprocess import os import glob def get_image(filename): p = Image.open(filename) docker_image = p.info['im2sim_image'] return subprocess.call('docker pull {}'.format(docker_image), shell=True) print('Pulled docker image {}'.format(docker_image)) def tag_images(docker_image): subprocess.call(['mkdir', '-p', 'figures']) subprocess.call("docker run -v {}/figures:/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) figures = glob.glob('{}/figures/*.png'.format(os.getcwd())) for filename in figures: p = Image.open(filename) info = PIL.PngImagePlugin.PngInfo() info.add_text('im2sim_image', docker_image) p.save(filename, pnginfo = info) return None parser = argparse.ArgumentParser() parser.add_argument("action", help="'pull', 'tag'") parser.add_argument("object", help="Figure file (if pulling)" " or docker container (if tagging)") args = parser.parse_args() print("Action {}, Object {}".format(args.action, args.object)) if args.action == 'pull': get_image(args.object) elif args.action == 'tag': tag_images(args.object) else: print("Action must be either 'pull' or 'tag'.")
... subprocess.call(['mkdir', '-p', 'figures']) subprocess.call("docker run -v {}/figures:/figures " "{} make figures".format(os.getcwd(), docker_image), shell=True) ...
a54a2e735950c5c31ec71613750bdf1ce194389f
django_datastream/urls.py
django_datastream/urls.py
from django.conf import urls from tastypie import api from . import resources v1_api = api.Api(api_name='v1') v1_api.register(resources.StreamResource()) urlpatterns = urls.patterns( '', urls.url(r'^', urls.include(v1_api.urls)), )
from django.conf import urls from tastypie import api from . import resources v1_api = api.Api(api_name='v1') v1_api.register(resources.StreamResource()) urlpatterns = [ urls.url(r'^', urls.include(v1_api.urls)), ]
Fix urlpatterns for Django 1.10.
Fix urlpatterns for Django 1.10.
Python
agpl-3.0
wlanslovenija/django-datastream,wlanslovenija/django-datastream,wlanslovenija/django-datastream
from django.conf import urls from tastypie import api from . import resources v1_api = api.Api(api_name='v1') v1_api.register(resources.StreamResource()) - urlpatterns = urls.patterns( - '', + urlpatterns = [ + urls.url(r'^', urls.include(v1_api.urls)), + ] - urls.url(r'^', urls.include(v1_api.urls)), - ) -
Fix urlpatterns for Django 1.10.
## Code Before: from django.conf import urls from tastypie import api from . import resources v1_api = api.Api(api_name='v1') v1_api.register(resources.StreamResource()) urlpatterns = urls.patterns( '', urls.url(r'^', urls.include(v1_api.urls)), ) ## Instruction: Fix urlpatterns for Django 1.10. ## Code After: from django.conf import urls from tastypie import api from . import resources v1_api = api.Api(api_name='v1') v1_api.register(resources.StreamResource()) urlpatterns = [ urls.url(r'^', urls.include(v1_api.urls)), ]
// ... existing code ... urlpatterns = [ urls.url(r'^', urls.include(v1_api.urls)), ] // ... rest of the code ...
336313ac982b9de278aca6e0f74f6820ef5b2526
landlab/grid/structured_quad/tests/test_links.py
landlab/grid/structured_quad/tests/test_links.py
import numpy as np from numpy.testing import assert_array_equal from nose.tools import raises, assert_true from landlab.grid.structured_quad.nodes import status_with_perimeter_as_boundary from landlab.grid.structured_quad.links import active_link_ids from landlab.grid.base import CORE_NODE, FIXED_VALUE_BOUNDARY, CLOSED_BOUNDARY def test_active_links_ids(): status = np.empty((4, 5), dtype=int) status.fill(CLOSED_BOUNDARY) status[1, 2] = status[1, 3] = status[2, 2] = status[2, 3] = CORE_NODE link_ids = active_link_ids((4, 5), status) assert_array_equal(link_ids, [7, 8, 21, 25]) assert_true(str(link_ids.dtype).startswith('int')) def test_active_links_with_edge_boundaries(): status = status_with_perimeter_as_boundary((3, 4)) link_ids = active_link_ids((3, 4), status) assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) assert_true(str(link_ids.dtype).startswith('int')) @raises(ValueError) def test_active_link_ids_with_shape_mismatch(): active_link_ids((3, 4), np.zeros(3))
import numpy as np from numpy.testing import assert_array_equal from nose.tools import raises, assert_equal from landlab.grid.structured_quad.nodes import status_with_perimeter_as_boundary from landlab.grid.structured_quad.links import active_link_ids from landlab.grid.base import CORE_NODE, FIXED_VALUE_BOUNDARY, CLOSED_BOUNDARY def test_active_links_ids(): status = np.empty((4, 5), dtype=int) status.fill(CLOSED_BOUNDARY) status[1, 2] = status[1, 3] = status[2, 2] = status[2, 3] = CORE_NODE link_ids = active_link_ids((4, 5), status) assert_array_equal(link_ids, [7, 8, 21, 25]) assert_equal(link_ids.dtype, np.int) def test_active_links_with_edge_boundaries(): status = status_with_perimeter_as_boundary((3, 4)) link_ids = active_link_ids((3, 4), status) assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) assert_equal(link_ids.dtype, np.int) @raises(ValueError) def test_active_link_ids_with_shape_mismatch(): active_link_ids((3, 4), np.zeros(3))
Test of index arrays of type int.
Test of index arrays of type int.
Python
mit
ManuSchmi88/landlab,amandersillinois/landlab,SiccarPoint/landlab,cmshobe/landlab,amandersillinois/landlab,SiccarPoint/landlab,cmshobe/landlab,laijingtao/landlab,RondaStrauch/landlab,Carralex/landlab,Carralex/landlab,ManuSchmi88/landlab,cmshobe/landlab,Carralex/landlab,landlab/landlab,csherwood-usgs/landlab,ManuSchmi88/landlab,landlab/landlab,laijingtao/landlab,RondaStrauch/landlab,decvalts/landlab,RondaStrauch/landlab,decvalts/landlab,landlab/landlab,csherwood-usgs/landlab
import numpy as np from numpy.testing import assert_array_equal - from nose.tools import raises, assert_true + from nose.tools import raises, assert_equal from landlab.grid.structured_quad.nodes import status_with_perimeter_as_boundary from landlab.grid.structured_quad.links import active_link_ids from landlab.grid.base import CORE_NODE, FIXED_VALUE_BOUNDARY, CLOSED_BOUNDARY def test_active_links_ids(): status = np.empty((4, 5), dtype=int) status.fill(CLOSED_BOUNDARY) status[1, 2] = status[1, 3] = status[2, 2] = status[2, 3] = CORE_NODE link_ids = active_link_ids((4, 5), status) assert_array_equal(link_ids, [7, 8, 21, 25]) - assert_true(str(link_ids.dtype).startswith('int')) + assert_equal(link_ids.dtype, np.int) def test_active_links_with_edge_boundaries(): status = status_with_perimeter_as_boundary((3, 4)) link_ids = active_link_ids((3, 4), status) assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) - assert_true(str(link_ids.dtype).startswith('int')) + assert_equal(link_ids.dtype, np.int) @raises(ValueError) def test_active_link_ids_with_shape_mismatch(): active_link_ids((3, 4), np.zeros(3))
Test of index arrays of type int.
## Code Before: import numpy as np from numpy.testing import assert_array_equal from nose.tools import raises, assert_true from landlab.grid.structured_quad.nodes import status_with_perimeter_as_boundary from landlab.grid.structured_quad.links import active_link_ids from landlab.grid.base import CORE_NODE, FIXED_VALUE_BOUNDARY, CLOSED_BOUNDARY def test_active_links_ids(): status = np.empty((4, 5), dtype=int) status.fill(CLOSED_BOUNDARY) status[1, 2] = status[1, 3] = status[2, 2] = status[2, 3] = CORE_NODE link_ids = active_link_ids((4, 5), status) assert_array_equal(link_ids, [7, 8, 21, 25]) assert_true(str(link_ids.dtype).startswith('int')) def test_active_links_with_edge_boundaries(): status = status_with_perimeter_as_boundary((3, 4)) link_ids = active_link_ids((3, 4), status) assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) assert_true(str(link_ids.dtype).startswith('int')) @raises(ValueError) def test_active_link_ids_with_shape_mismatch(): active_link_ids((3, 4), np.zeros(3)) ## Instruction: Test of index arrays of type int. ## Code After: import numpy as np from numpy.testing import assert_array_equal from nose.tools import raises, assert_equal from landlab.grid.structured_quad.nodes import status_with_perimeter_as_boundary from landlab.grid.structured_quad.links import active_link_ids from landlab.grid.base import CORE_NODE, FIXED_VALUE_BOUNDARY, CLOSED_BOUNDARY def test_active_links_ids(): status = np.empty((4, 5), dtype=int) status.fill(CLOSED_BOUNDARY) status[1, 2] = status[1, 3] = status[2, 2] = status[2, 3] = CORE_NODE link_ids = active_link_ids((4, 5), status) assert_array_equal(link_ids, [7, 8, 21, 25]) assert_equal(link_ids.dtype, np.int) def test_active_links_with_edge_boundaries(): status = status_with_perimeter_as_boundary((3, 4)) link_ids = active_link_ids((3, 4), status) assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) assert_equal(link_ids.dtype, np.int) @raises(ValueError) def test_active_link_ids_with_shape_mismatch(): active_link_ids((3, 4), np.zeros(3))
# ... existing code ... from numpy.testing import assert_array_equal from nose.tools import raises, assert_equal # ... modified code ... assert_array_equal(link_ids, [7, 8, 21, 25]) assert_equal(link_ids.dtype, np.int) ... assert_array_equal(link_ids, [1, 2, 5, 6, 11, 12, 13]) assert_equal(link_ids.dtype, np.int) # ... rest of the code ...
6dfbbba5abf380e3f47f9190a864faa13cf1599d
data_preparation.py
data_preparation.py
import pandas as pd import numpy as np orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv') order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv') grouped = order_products_prior_df.groupby('order_id', as_index = False) grouped_data = pd.DataFrame() grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean) def product_ids(group): l = [] for e in group['product_id']: l.append(str(e)) return ' '.join(l) grouped_data['product_ids'] = grouped.apply(product_ids) def add_to_cart_orders(group): l = [] for e in group['add_to_cart_order']: l.append(str(e)) return ' '.join(l) grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) print('First five rows of grouped_data:\n', grouped_data.head()) orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id') print('First five rows of orders_prior_merged:\n', orders_prior_merged.head())
import pandas as pd import numpy as np orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv') order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv') grouped = order_products_prior_df.groupby('order_id', as_index = False) grouped_data = pd.DataFrame() grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean) def product_ids(group): l = [] for e in group['product_id']: l.append(str(e)) return ' '.join(l) grouped_data['product_ids'] = grouped.apply(product_ids) def add_to_cart_orders(group): l = [] for e in group['add_to_cart_order']: l.append(str(e)) return ' '.join(l) grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) grouped_data['reordered'] = grouped['reordered'].aggregate(np.mean)['reordered'].round() print('First five rows of grouped_data:\n', grouped_data.head()) orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id') print('First five rows of orders_prior_merged:\n', orders_prior_merged.head())
Merge product reordered column with order ids
feat: Merge product reordered column with order ids
Python
mit
rjegankumar/instacart_prediction_model
import pandas as pd import numpy as np orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv') order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv') grouped = order_products_prior_df.groupby('order_id', as_index = False) grouped_data = pd.DataFrame() grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean) def product_ids(group): l = [] for e in group['product_id']: l.append(str(e)) return ' '.join(l) grouped_data['product_ids'] = grouped.apply(product_ids) def add_to_cart_orders(group): l = [] for e in group['add_to_cart_order']: l.append(str(e)) return ' '.join(l) grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) + + grouped_data['reordered'] = grouped['reordered'].aggregate(np.mean)['reordered'].round() print('First five rows of grouped_data:\n', grouped_data.head()) orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id') print('First five rows of orders_prior_merged:\n', orders_prior_merged.head())
Merge product reordered column with order ids
## Code Before: import pandas as pd import numpy as np orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv') order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv') grouped = order_products_prior_df.groupby('order_id', as_index = False) grouped_data = pd.DataFrame() grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean) def product_ids(group): l = [] for e in group['product_id']: l.append(str(e)) return ' '.join(l) grouped_data['product_ids'] = grouped.apply(product_ids) def add_to_cart_orders(group): l = [] for e in group['add_to_cart_order']: l.append(str(e)) return ' '.join(l) grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) print('First five rows of grouped_data:\n', grouped_data.head()) orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id') print('First five rows of orders_prior_merged:\n', orders_prior_merged.head()) ## Instruction: Merge product reordered column with order ids ## Code After: import pandas as pd import numpy as np orders_prior_df = pd.read_csv('Data/orders_prior_sample.csv') order_products_prior_df = pd.read_csv('Data/order_products_prior_sample.csv') grouped = order_products_prior_df.groupby('order_id', as_index = False) grouped_data = pd.DataFrame() grouped_data['order_id'] = grouped['order_id'].aggregate(np.mean) def product_ids(group): l = [] for e in group['product_id']: l.append(str(e)) return ' '.join(l) grouped_data['product_ids'] = grouped.apply(product_ids) def add_to_cart_orders(group): l = [] for e in group['add_to_cart_order']: l.append(str(e)) return ' '.join(l) grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) grouped_data['reordered'] = grouped['reordered'].aggregate(np.mean)['reordered'].round() print('First five rows of grouped_data:\n', grouped_data.head()) orders_prior_merged = pd.merge(orders_prior_df, grouped_data, on='order_id') print('First five rows of orders_prior_merged:\n', orders_prior_merged.head())
... grouped_data['add_to_cart_orders'] = grouped.apply(add_to_cart_orders) grouped_data['reordered'] = grouped['reordered'].aggregate(np.mean)['reordered'].round() print('First five rows of grouped_data:\n', grouped_data.head()) ...
6cb38efab37f8953c8ba56662ba512af0f84432f
tests/semver_test.py
tests/semver_test.py
from unittest import TestCase from semver import compare from semver import match from semver import parse class TestSemver(TestCase): def test_should_parse_version(self): self.assertEquals( parse("1.2.3-alpha.1.2+build.11.e0f985a"), {'major': 1, 'minor': 2, 'patch': 3, 'prerelease': 'alpha.1.2', 'build': 'build.11.e0f985a'}) def test_should_get_less(self): self.assertEquals( compare("1.0.0", "2.0.0"), -1) def test_should_get_greater(self): self.assertEquals( compare("2.0.0", "1.0.0"), 1) def test_should_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.6"), True) def test_should_no_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.8"), False)
from unittest import TestCase from semver import compare from semver import match from semver import parse class TestSemver(TestCase): def test_should_parse_version(self): self.assertEquals( parse("1.2.3-alpha.1.2+build.11.e0f985a"), {'major': 1, 'minor': 2, 'patch': 3, 'prerelease': 'alpha.1.2', 'build': 'build.11.e0f985a'}) def test_should_get_less(self): self.assertEquals( compare("1.0.0", "2.0.0"), -1) def test_should_get_greater(self): self.assertEquals( compare("2.0.0", "1.0.0"), 1) def test_should_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.6"), True) def test_should_no_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.8"), False) def test_should_raise_value_error_for_invalid_value(self): self.assertRaises(ValueError, compare, 'foo', 'bar') self.assertRaises(ValueError, compare, '1.0', '1.0.0') self.assertRaises(ValueError, compare, '1.x', '1.0.0') def test_should_raise_value_error_for_invalid_match_expression(self): self.assertRaises(ValueError, match, '1.0.0', '') self.assertRaises(ValueError, match, '1.0.0', '!') self.assertRaises(ValueError, match, '1.0.0', '1.0.0')
Add tests for error cases that proves incompatibility with Python 2.5 and early versions.
Add tests for error cases that proves incompatibility with Python 2.5 and early versions.
Python
bsd-3-clause
python-semver/python-semver,k-bx/python-semver
from unittest import TestCase - from semver import compare from semver import match from semver import parse class TestSemver(TestCase): def test_should_parse_version(self): self.assertEquals( parse("1.2.3-alpha.1.2+build.11.e0f985a"), {'major': 1, 'minor': 2, 'patch': 3, 'prerelease': 'alpha.1.2', 'build': 'build.11.e0f985a'}) def test_should_get_less(self): self.assertEquals( compare("1.0.0", "2.0.0"), -1) def test_should_get_greater(self): self.assertEquals( compare("2.0.0", "1.0.0"), 1) def test_should_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.6"), True) def test_should_no_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.8"), False) + def test_should_raise_value_error_for_invalid_value(self): + self.assertRaises(ValueError, compare, 'foo', 'bar') + self.assertRaises(ValueError, compare, '1.0', '1.0.0') + self.assertRaises(ValueError, compare, '1.x', '1.0.0') + + def test_should_raise_value_error_for_invalid_match_expression(self): + self.assertRaises(ValueError, match, '1.0.0', '') + self.assertRaises(ValueError, match, '1.0.0', '!') + self.assertRaises(ValueError, match, '1.0.0', '1.0.0') +
Add tests for error cases that proves incompatibility with Python 2.5 and early versions.
## Code Before: from unittest import TestCase from semver import compare from semver import match from semver import parse class TestSemver(TestCase): def test_should_parse_version(self): self.assertEquals( parse("1.2.3-alpha.1.2+build.11.e0f985a"), {'major': 1, 'minor': 2, 'patch': 3, 'prerelease': 'alpha.1.2', 'build': 'build.11.e0f985a'}) def test_should_get_less(self): self.assertEquals( compare("1.0.0", "2.0.0"), -1) def test_should_get_greater(self): self.assertEquals( compare("2.0.0", "1.0.0"), 1) def test_should_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.6"), True) def test_should_no_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.8"), False) ## Instruction: Add tests for error cases that proves incompatibility with Python 2.5 and early versions. ## Code After: from unittest import TestCase from semver import compare from semver import match from semver import parse class TestSemver(TestCase): def test_should_parse_version(self): self.assertEquals( parse("1.2.3-alpha.1.2+build.11.e0f985a"), {'major': 1, 'minor': 2, 'patch': 3, 'prerelease': 'alpha.1.2', 'build': 'build.11.e0f985a'}) def test_should_get_less(self): self.assertEquals( compare("1.0.0", "2.0.0"), -1) def test_should_get_greater(self): self.assertEquals( compare("2.0.0", "1.0.0"), 1) def test_should_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.6"), True) def test_should_no_match_simple(self): self.assertEquals( match("2.3.7", ">=2.3.8"), False) def test_should_raise_value_error_for_invalid_value(self): self.assertRaises(ValueError, compare, 'foo', 'bar') self.assertRaises(ValueError, compare, '1.0', '1.0.0') self.assertRaises(ValueError, compare, '1.x', '1.0.0') def test_should_raise_value_error_for_invalid_match_expression(self): self.assertRaises(ValueError, match, '1.0.0', '') self.assertRaises(ValueError, match, '1.0.0', '!') self.assertRaises(ValueError, match, '1.0.0', '1.0.0')
# ... existing code ... from unittest import TestCase from semver import compare # ... modified code ... False) def test_should_raise_value_error_for_invalid_value(self): self.assertRaises(ValueError, compare, 'foo', 'bar') self.assertRaises(ValueError, compare, '1.0', '1.0.0') self.assertRaises(ValueError, compare, '1.x', '1.0.0') def test_should_raise_value_error_for_invalid_match_expression(self): self.assertRaises(ValueError, match, '1.0.0', '') self.assertRaises(ValueError, match, '1.0.0', '!') self.assertRaises(ValueError, match, '1.0.0', '1.0.0') # ... rest of the code ...
829f71c488f2332d66362d7aea309a8b8958d522
jarviscli/tests/test_voice.py
jarviscli/tests/test_voice.py
import unittest from tests import PluginTest from plugins import voice from CmdInterpreter import JarvisAPI from Jarvis import Jarvis # this test class contains test cases for the plugins "gtts" and "disable_gtts" # which are included in the "voice.py" file in the "plugins" folder class VoiceTest(PluginTest): # test "gtts" plugin def setUp(self): self.test_gtts = self.load_plugin(voice.gtts) def test_gtts(self): # run "gtts" plugin code self.test_gtts.run(voice.gtts) # verify that "gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), True) # test "disable_gtts" plugin def setUp(self): self.test_disable_gtts = self.load_plugin(voice.disable_gtts) def test_disable_gtts(self): # run "disable_gtts" plugin code self.test_disable_gtts.run(voice.disable_gtts) # verify that "disable_gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), False) if __name__ == '__main__': unittest.main()
import unittest from tests import PluginTest from plugins import voice from CmdInterpreter import JarvisAPI from Jarvis import Jarvis # this test class contains test cases for the plugins "gtts" and "disable_gtts" # which are included in the "voice.py" file in the "plugins" folder class VoiceTest(PluginTest): # test "gtts" plugin def setUp(self): self.test_gtts = self.load_plugin(voice.gtts) def test_gtts(self): # run "gtts" plugin code self.test_gtts.gtts(jarvis, self) # verify that "gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), True) # test "disable_gtts" plugin def setUp(self): self.test_disable_gtts = self.load_plugin(voice.disable_gtts) def test_disable_gtts(self): # run "disable_gtts" plugin code self.test_disable_gtts.disable_gtts(jarvis, self) # verify that "disable_gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), False) if __name__ == '__main__': unittest.main()
Fix unit test of voice function
Fix unit test of voice function
Python
mit
sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis
import unittest from tests import PluginTest from plugins import voice from CmdInterpreter import JarvisAPI from Jarvis import Jarvis # this test class contains test cases for the plugins "gtts" and "disable_gtts" # which are included in the "voice.py" file in the "plugins" folder class VoiceTest(PluginTest): # test "gtts" plugin def setUp(self): self.test_gtts = self.load_plugin(voice.gtts) def test_gtts(self): # run "gtts" plugin code - self.test_gtts.run(voice.gtts) + self.test_gtts.gtts(jarvis, self) # verify that "gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), True) # test "disable_gtts" plugin def setUp(self): self.test_disable_gtts = self.load_plugin(voice.disable_gtts) def test_disable_gtts(self): # run "disable_gtts" plugin code - self.test_disable_gtts.run(voice.disable_gtts) + self.test_disable_gtts.disable_gtts(jarvis, self) # verify that "disable_gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), False) if __name__ == '__main__': unittest.main()
Fix unit test of voice function
## Code Before: import unittest from tests import PluginTest from plugins import voice from CmdInterpreter import JarvisAPI from Jarvis import Jarvis # this test class contains test cases for the plugins "gtts" and "disable_gtts" # which are included in the "voice.py" file in the "plugins" folder class VoiceTest(PluginTest): # test "gtts" plugin def setUp(self): self.test_gtts = self.load_plugin(voice.gtts) def test_gtts(self): # run "gtts" plugin code self.test_gtts.run(voice.gtts) # verify that "gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), True) # test "disable_gtts" plugin def setUp(self): self.test_disable_gtts = self.load_plugin(voice.disable_gtts) def test_disable_gtts(self): # run "disable_gtts" plugin code self.test_disable_gtts.run(voice.disable_gtts) # verify that "disable_gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), False) if __name__ == '__main__': unittest.main() ## Instruction: Fix unit test of voice function ## Code After: import unittest from tests import PluginTest from plugins import voice from CmdInterpreter import JarvisAPI from Jarvis import Jarvis # this test class contains test cases for the plugins "gtts" and "disable_gtts" # which are included in the "voice.py" file in the "plugins" folder class VoiceTest(PluginTest): # test "gtts" plugin def setUp(self): self.test_gtts = self.load_plugin(voice.gtts) def test_gtts(self): # run "gtts" plugin code self.test_gtts.gtts(jarvis, self) # verify that "gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), True) # test "disable_gtts" plugin def setUp(self): self.test_disable_gtts = self.load_plugin(voice.disable_gtts) def test_disable_gtts(self): # run "disable_gtts" plugin code self.test_disable_gtts.disable_gtts(jarvis, self) # verify that "disable_gtts" plugin code works self.assertEqual(self.jarvis_api.get_data('gtts_status'), False) if __name__ == '__main__': unittest.main()
# ... existing code ... # run "gtts" plugin code self.test_gtts.gtts(jarvis, self) # ... modified code ... # run "disable_gtts" plugin code self.test_disable_gtts.disable_gtts(jarvis, self) # ... rest of the code ...
4c95c238cd198779b7019a72b412ce20ddf865bd
alg_gcd.py
alg_gcd.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function def gcd(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(1). """ while n != 0: m, n = n, m % n return m def main(): print('gcd(4, 2): {}'.format(gcd(4, 2))) print('gcd(2, 4): {}'.format(gcd(2, 4))) print('gcd(10, 4): {}'.format(gcd(10, 4))) print('gcd(4, 10): {}'.format(gcd(4, 10))) print('gcd(3, 4): {}'.format(gcd(3, 4))) print('gcd(4, 3): {}'.format(gcd(4, 3))) if __name__ == '__main__': main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function def gcd_recur(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(m%n). """ if n == 0: return m return gcd_recur(n, m % n) def gcd_iter(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(1). """ while n != 0: m, n = n, m % n return m def main(): import time start_time = time.time() print('gcd_recur(4, 2): {}'.format(gcd_recur(4, 2))) print('gcd_recur(2, 4): {}'.format(gcd_recur(2, 4))) print('gcd_recur(10, 4): {}'.format(gcd_recur(10, 4))) print('gcd_recur(4, 10): {}'.format(gcd_recur(4, 10))) print('gcd_recur(3, 4): {}'.format(gcd_recur(3, 4))) print('gcd_recur(4, 3): {}'.format(gcd_recur(4, 3))) print('Time:', time.time() - start_time) start_time = time.time() print('gcd_iter(4, 2): {}'.format(gcd_iter(4, 2))) print('gcd_iter(2, 4): {}'.format(gcd_iter(2, 4))) print('gcd_iter(10, 4): {}'.format(gcd_iter(10, 4))) print('gcd_iter(4, 10): {}'.format(gcd_iter(4, 10))) print('gcd_iter(3, 4): {}'.format(gcd_iter(3, 4))) print('gcd_iter(4, 3): {}'.format(gcd_iter(4, 3))) print('Time:', time.time() - start_time) if __name__ == '__main__': main()
Complete gcd recur sol w/ time/space complexity
Complete gcd recur sol w/ time/space complexity
Python
bsd-2-clause
bowen0701/algorithms_data_structures
from __future__ import absolute_import from __future__ import division from __future__ import print_function + def gcd_recur(m, n): + """Greatest Common Divisor (GCD) by Euclid's Algorithm. + + Time complexity: O(m%n). + Space complexity: O(m%n). + """ + if n == 0: + return m + return gcd_recur(n, m % n) + + - def gcd(m, n): + def gcd_iter(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(1). """ while n != 0: m, n = n, m % n return m def main(): + import time - print('gcd(4, 2): {}'.format(gcd(4, 2))) - print('gcd(2, 4): {}'.format(gcd(2, 4))) + start_time = time.time() + print('gcd_recur(4, 2): {}'.format(gcd_recur(4, 2))) + print('gcd_recur(2, 4): {}'.format(gcd_recur(2, 4))) - print('gcd(10, 4): {}'.format(gcd(10, 4))) + print('gcd_recur(10, 4): {}'.format(gcd_recur(10, 4))) - print('gcd(4, 10): {}'.format(gcd(4, 10))) + print('gcd_recur(4, 10): {}'.format(gcd_recur(4, 10))) + print('gcd_recur(3, 4): {}'.format(gcd_recur(3, 4))) + print('gcd_recur(4, 3): {}'.format(gcd_recur(4, 3))) + print('Time:', time.time() - start_time) + start_time = time.time() + print('gcd_iter(4, 2): {}'.format(gcd_iter(4, 2))) + print('gcd_iter(2, 4): {}'.format(gcd_iter(2, 4))) + print('gcd_iter(10, 4): {}'.format(gcd_iter(10, 4))) + print('gcd_iter(4, 10): {}'.format(gcd_iter(4, 10))) - print('gcd(3, 4): {}'.format(gcd(3, 4))) + print('gcd_iter(3, 4): {}'.format(gcd_iter(3, 4))) - print('gcd(4, 3): {}'.format(gcd(4, 3))) + print('gcd_iter(4, 3): {}'.format(gcd_iter(4, 3))) + print('Time:', time.time() - start_time) if __name__ == '__main__': main()
Complete gcd recur sol w/ time/space complexity
## Code Before: from __future__ import absolute_import from __future__ import division from __future__ import print_function def gcd(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(1). """ while n != 0: m, n = n, m % n return m def main(): print('gcd(4, 2): {}'.format(gcd(4, 2))) print('gcd(2, 4): {}'.format(gcd(2, 4))) print('gcd(10, 4): {}'.format(gcd(10, 4))) print('gcd(4, 10): {}'.format(gcd(4, 10))) print('gcd(3, 4): {}'.format(gcd(3, 4))) print('gcd(4, 3): {}'.format(gcd(4, 3))) if __name__ == '__main__': main() ## Instruction: Complete gcd recur sol w/ time/space complexity ## Code After: from __future__ import absolute_import from __future__ import division from __future__ import print_function def gcd_recur(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(m%n). """ if n == 0: return m return gcd_recur(n, m % n) def gcd_iter(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(1). """ while n != 0: m, n = n, m % n return m def main(): import time start_time = time.time() print('gcd_recur(4, 2): {}'.format(gcd_recur(4, 2))) print('gcd_recur(2, 4): {}'.format(gcd_recur(2, 4))) print('gcd_recur(10, 4): {}'.format(gcd_recur(10, 4))) print('gcd_recur(4, 10): {}'.format(gcd_recur(4, 10))) print('gcd_recur(3, 4): {}'.format(gcd_recur(3, 4))) print('gcd_recur(4, 3): {}'.format(gcd_recur(4, 3))) print('Time:', time.time() - start_time) start_time = time.time() print('gcd_iter(4, 2): {}'.format(gcd_iter(4, 2))) print('gcd_iter(2, 4): {}'.format(gcd_iter(2, 4))) print('gcd_iter(10, 4): {}'.format(gcd_iter(10, 4))) print('gcd_iter(4, 10): {}'.format(gcd_iter(4, 10))) print('gcd_iter(3, 4): {}'.format(gcd_iter(3, 4))) print('gcd_iter(4, 3): {}'.format(gcd_iter(4, 3))) print('Time:', time.time() - start_time) if __name__ == '__main__': main()
# ... existing code ... def gcd_recur(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. Time complexity: O(m%n). Space complexity: O(m%n). """ if n == 0: return m return gcd_recur(n, m % n) def gcd_iter(m, n): """Greatest Common Divisor (GCD) by Euclid's Algorithm. # ... modified code ... def main(): import time start_time = time.time() print('gcd_recur(4, 2): {}'.format(gcd_recur(4, 2))) print('gcd_recur(2, 4): {}'.format(gcd_recur(2, 4))) print('gcd_recur(10, 4): {}'.format(gcd_recur(10, 4))) print('gcd_recur(4, 10): {}'.format(gcd_recur(4, 10))) print('gcd_recur(3, 4): {}'.format(gcd_recur(3, 4))) print('gcd_recur(4, 3): {}'.format(gcd_recur(4, 3))) print('Time:', time.time() - start_time) start_time = time.time() print('gcd_iter(4, 2): {}'.format(gcd_iter(4, 2))) print('gcd_iter(2, 4): {}'.format(gcd_iter(2, 4))) print('gcd_iter(10, 4): {}'.format(gcd_iter(10, 4))) print('gcd_iter(4, 10): {}'.format(gcd_iter(4, 10))) print('gcd_iter(3, 4): {}'.format(gcd_iter(3, 4))) print('gcd_iter(4, 3): {}'.format(gcd_iter(4, 3))) print('Time:', time.time() - start_time) # ... rest of the code ...
bfc7e08ba70ba0e3acb9e4cc69b70c816845b6cb
djofx/views/home.py
djofx/views/home.py
from django.db.models import Sum from django.views.generic import TemplateView from djofx.forms import OFXForm from djofx.views.base import PageTitleMixin, UserRequiredMixin from djofx import models class HomePageView(PageTitleMixin, UserRequiredMixin, TemplateView): template_name = "djofx/home.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['accounts'] = models.Account.objects.filter( owner=self.request.user ) context['form'] = OFXForm() breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__is_void=False ).values( 'transaction_category__pk', 'transaction_category__name' ).annotate( total=Sum('amount') ).order_by('-total') context['breakdown'] = [ ( abs(item['total']), item['transaction_category__pk'], item['transaction_category__name'] ) for item in breakdown ] return context
from datetime import date, timedelta from django.db.models import Sum from django.views.generic import TemplateView from djofx.forms import OFXForm from djofx.views.base import PageTitleMixin, UserRequiredMixin from djofx import models from operator import itemgetter class HomePageView(PageTitleMixin, UserRequiredMixin, TemplateView): template_name = "djofx/home.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['accounts'] = models.Account.objects.filter( owner=self.request.user ) context['form'] = OFXForm() cutoff = date.today() - timedelta(days=120) uncategorised_breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__isnull=True, date__gte=cutoff ).aggregate( total=Sum('amount') ) breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__is_void=False, date__gte=cutoff ).values( 'transaction_category__pk', 'transaction_category__name' ).annotate( total=Sum('amount') ).order_by('-total') context['breakdown'] = [ ( abs(item['total']), item['transaction_category__pk'], item['transaction_category__name'] ) for item in breakdown ] context['breakdown'].append( ( uncategorised_breakdown['total'] * -1, 0, 'Uncategorised' ) ) context['breakdown'] = sorted(context['breakdown'], key=itemgetter(0), reverse=True) return context
Include uncategorised spending in overview pie chart
Include uncategorised spending in overview pie chart Also, only show last 120 days
Python
mit
dominicrodger/djofx,dominicrodger/djofx,dominicrodger/djofx
+ from datetime import date, timedelta from django.db.models import Sum from django.views.generic import TemplateView from djofx.forms import OFXForm from djofx.views.base import PageTitleMixin, UserRequiredMixin from djofx import models + from operator import itemgetter class HomePageView(PageTitleMixin, UserRequiredMixin, TemplateView): template_name = "djofx/home.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['accounts'] = models.Account.objects.filter( owner=self.request.user ) context['form'] = OFXForm() + cutoff = date.today() - timedelta(days=120) + + uncategorised_breakdown = models.Transaction.objects.filter( + amount__lt=0, + transaction_category__isnull=True, + date__gte=cutoff + ).aggregate( + total=Sum('amount') + ) + breakdown = models.Transaction.objects.filter( amount__lt=0, - transaction_category__is_void=False + transaction_category__is_void=False, + date__gte=cutoff ).values( 'transaction_category__pk', 'transaction_category__name' ).annotate( total=Sum('amount') ).order_by('-total') + context['breakdown'] = [ ( abs(item['total']), item['transaction_category__pk'], item['transaction_category__name'] ) for item in breakdown ] + context['breakdown'].append( + ( + uncategorised_breakdown['total'] * -1, + 0, + 'Uncategorised' + ) + ) + context['breakdown'] = sorted(context['breakdown'], + key=itemgetter(0), + reverse=True) return context
Include uncategorised spending in overview pie chart
## Code Before: from django.db.models import Sum from django.views.generic import TemplateView from djofx.forms import OFXForm from djofx.views.base import PageTitleMixin, UserRequiredMixin from djofx import models class HomePageView(PageTitleMixin, UserRequiredMixin, TemplateView): template_name = "djofx/home.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['accounts'] = models.Account.objects.filter( owner=self.request.user ) context['form'] = OFXForm() breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__is_void=False ).values( 'transaction_category__pk', 'transaction_category__name' ).annotate( total=Sum('amount') ).order_by('-total') context['breakdown'] = [ ( abs(item['total']), item['transaction_category__pk'], item['transaction_category__name'] ) for item in breakdown ] return context ## Instruction: Include uncategorised spending in overview pie chart ## Code After: from datetime import date, timedelta from django.db.models import Sum from django.views.generic import TemplateView from djofx.forms import OFXForm from djofx.views.base import PageTitleMixin, UserRequiredMixin from djofx import models from operator import itemgetter class HomePageView(PageTitleMixin, UserRequiredMixin, TemplateView): template_name = "djofx/home.html" def get_context_data(self, **kwargs): context = super(HomePageView, self).get_context_data(**kwargs) context['accounts'] = models.Account.objects.filter( owner=self.request.user ) context['form'] = OFXForm() cutoff = date.today() - timedelta(days=120) uncategorised_breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__isnull=True, date__gte=cutoff ).aggregate( total=Sum('amount') ) breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__is_void=False, date__gte=cutoff ).values( 'transaction_category__pk', 'transaction_category__name' ).annotate( total=Sum('amount') ).order_by('-total') context['breakdown'] = [ ( abs(item['total']), item['transaction_category__pk'], item['transaction_category__name'] ) for item in breakdown ] context['breakdown'].append( ( uncategorised_breakdown['total'] * -1, 0, 'Uncategorised' ) ) context['breakdown'] = sorted(context['breakdown'], key=itemgetter(0), reverse=True) return context
// ... existing code ... from datetime import date, timedelta from django.db.models import Sum // ... modified code ... from djofx import models from operator import itemgetter ... cutoff = date.today() - timedelta(days=120) uncategorised_breakdown = models.Transaction.objects.filter( amount__lt=0, transaction_category__isnull=True, date__gte=cutoff ).aggregate( total=Sum('amount') ) breakdown = models.Transaction.objects.filter( ... amount__lt=0, transaction_category__is_void=False, date__gte=cutoff ).values( ... ).order_by('-total') context['breakdown'] = [ ... ] context['breakdown'].append( ( uncategorised_breakdown['total'] * -1, 0, 'Uncategorised' ) ) context['breakdown'] = sorted(context['breakdown'], key=itemgetter(0), reverse=True) // ... rest of the code ...
2c02816c05f3863ef76b3a412ac5bad9eecfafdd
testrepository/tests/test_setup.py
testrepository/tests/test_setup.py
"""Tests for setup.py.""" import doctest import os import subprocess import sys from testtools import ( TestCase, ) from testtools.matchers import ( DocTestMatches, ) class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') proc = subprocess.Popen([sys.executable, path, 'bdist'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = proc.communicate() self.assertEqual(0, proc.returncode) self.assertThat(output, DocTestMatches("...running bdist...", doctest.ELLIPSIS))
"""Tests for setup.py.""" import doctest import os import subprocess import sys from testtools import ( TestCase, ) from testtools.matchers import ( DocTestMatches, ) class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') proc = subprocess.Popen([sys.executable, path, 'bdist'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output, _ = proc.communicate() self.assertEqual(0, proc.returncode) self.assertThat(output, DocTestMatches("""... running install_scripts ... adding '...testr' ...""", doctest.ELLIPSIS))
Make setup.py smoke test more specific again as requested in review
Make setup.py smoke test more specific again as requested in review
Python
apache-2.0
masayukig/stestr,masayukig/stestr,mtreinish/stestr,mtreinish/stestr
"""Tests for setup.py.""" import doctest import os import subprocess import sys from testtools import ( TestCase, ) from testtools.matchers import ( DocTestMatches, ) class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') proc = subprocess.Popen([sys.executable, path, 'bdist'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + stderr=subprocess.STDOUT, universal_newlines=True) output, _ = proc.communicate() self.assertEqual(0, proc.returncode) - self.assertThat(output, - DocTestMatches("...running bdist...", doctest.ELLIPSIS)) + self.assertThat(output, DocTestMatches("""... + running install_scripts + ... + adding '...testr' + ...""", doctest.ELLIPSIS))
Make setup.py smoke test more specific again as requested in review
## Code Before: """Tests for setup.py.""" import doctest import os import subprocess import sys from testtools import ( TestCase, ) from testtools.matchers import ( DocTestMatches, ) class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') proc = subprocess.Popen([sys.executable, path, 'bdist'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, _ = proc.communicate() self.assertEqual(0, proc.returncode) self.assertThat(output, DocTestMatches("...running bdist...", doctest.ELLIPSIS)) ## Instruction: Make setup.py smoke test more specific again as requested in review ## Code After: """Tests for setup.py.""" import doctest import os import subprocess import sys from testtools import ( TestCase, ) from testtools.matchers import ( DocTestMatches, ) class TestCanSetup(TestCase): def test_bdist(self): # Single smoke test to make sure we can build a package. path = os.path.join(os.path.dirname(__file__), '..', '..', 'setup.py') proc = subprocess.Popen([sys.executable, path, 'bdist'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output, _ = proc.communicate() self.assertEqual(0, proc.returncode) self.assertThat(output, DocTestMatches("""... running install_scripts ... adding '...testr' ...""", doctest.ELLIPSIS))
... stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output, _ = proc.communicate() ... self.assertEqual(0, proc.returncode) self.assertThat(output, DocTestMatches("""... running install_scripts ... adding '...testr' ...""", doctest.ELLIPSIS)) ...
c3284516e8dc2c7fccfbf7e4bff46a66b4ad2f15
cref/evaluation/__init__.py
cref/evaluation/__init__.py
import os import statistics from cref.structure import rmsd from cref.app.terminal import download_pdb, download_fasta, predict_fasta pdbs = ['1zdd', '1gab'] runs = 100 fragment_sizes = range(5, 13, 2) number_of_clusters = range(4, 20, 1) for pdb in pdbs: output_dir = 'predictions/evaluation/{}/'.format(pdb) try: os.mkdir(output_dir) except FileExistsError as e: print(e) for fragment_size in fragment_sizes: fasta_file = output_dir + pdb + '.fasta' download_fasta(pdb, fasta_file) for n in number_of_clusters: rmsds = [] for run in range(runs): params = { 'pdb': pdb, 'fragment_size': fragment_size, 'number_of_clusters': n } output_files = predict_fasta(fasta_file, output_dir, params) predicted_structure = output_files[0] filepath = os.path.join( os.path.dirname(predicted_structure), 'experimental_structure.pdb' ) experimental_structure = download_pdb(pdb, filepath) rmsds.append(rmsd(predicted_structure, experimental_structure)) print(pdb, fragment_size, n, statistics.mean(rmsds), statistics.pstdev(rmsds))
import os import statistics from cref.structure import rmsd from cref.app.terminal import download_pdb, download_fasta, predict_fasta pdbs = ['1zdd', '1gab'] runs = 5 fragment_sizes = range(5, 13, 2) number_of_clusters = range(4, 20, 1) for pdb in pdbs: output_dir = 'predictions/evaluation/{}/'.format(pdb) try: os.mkdir(output_dir) except FileExistsError as e: print(e) for fragment_size in fragment_sizes: fasta_file = output_dir + pdb + '.fasta' download_fasta(pdb, fasta_file) for n in number_of_clusters: rmsds = [] for run in range(runs): params = { 'pdb': pdb, 'fragment_size': fragment_size, 'number_of_clusters': n } prediction_output = output_dir + str(run) os.mkdir(prediction_output) output_files = predict_fasta(fasta_file, prediction_output, params) predicted_structure = output_files[0] filepath = os.path.join( os.path.dirname(predicted_structure), 'experimental_structure.pdb' ) experimental_structure = download_pdb(pdb, filepath) rmsds.append(rmsd(predicted_structure, experimental_structure)) print(pdb, fragment_size, n, statistics.mean(rmsds), statistics.pstdev(rmsds))
Save output for every run
Save output for every run
Python
mit
mchelem/cref2,mchelem/cref2,mchelem/cref2
import os import statistics from cref.structure import rmsd from cref.app.terminal import download_pdb, download_fasta, predict_fasta pdbs = ['1zdd', '1gab'] - runs = 100 + runs = 5 fragment_sizes = range(5, 13, 2) number_of_clusters = range(4, 20, 1) for pdb in pdbs: output_dir = 'predictions/evaluation/{}/'.format(pdb) try: os.mkdir(output_dir) except FileExistsError as e: print(e) for fragment_size in fragment_sizes: fasta_file = output_dir + pdb + '.fasta' download_fasta(pdb, fasta_file) for n in number_of_clusters: rmsds = [] for run in range(runs): params = { 'pdb': pdb, 'fragment_size': fragment_size, 'number_of_clusters': n } + prediction_output = output_dir + str(run) + os.mkdir(prediction_output) - output_files = predict_fasta(fasta_file, output_dir, params) + output_files = predict_fasta(fasta_file, prediction_output, params) predicted_structure = output_files[0] filepath = os.path.join( os.path.dirname(predicted_structure), 'experimental_structure.pdb' ) experimental_structure = download_pdb(pdb, filepath) rmsds.append(rmsd(predicted_structure, experimental_structure)) print(pdb, fragment_size, n, statistics.mean(rmsds), statistics.pstdev(rmsds))
Save output for every run
## Code Before: import os import statistics from cref.structure import rmsd from cref.app.terminal import download_pdb, download_fasta, predict_fasta pdbs = ['1zdd', '1gab'] runs = 100 fragment_sizes = range(5, 13, 2) number_of_clusters = range(4, 20, 1) for pdb in pdbs: output_dir = 'predictions/evaluation/{}/'.format(pdb) try: os.mkdir(output_dir) except FileExistsError as e: print(e) for fragment_size in fragment_sizes: fasta_file = output_dir + pdb + '.fasta' download_fasta(pdb, fasta_file) for n in number_of_clusters: rmsds = [] for run in range(runs): params = { 'pdb': pdb, 'fragment_size': fragment_size, 'number_of_clusters': n } output_files = predict_fasta(fasta_file, output_dir, params) predicted_structure = output_files[0] filepath = os.path.join( os.path.dirname(predicted_structure), 'experimental_structure.pdb' ) experimental_structure = download_pdb(pdb, filepath) rmsds.append(rmsd(predicted_structure, experimental_structure)) print(pdb, fragment_size, n, statistics.mean(rmsds), statistics.pstdev(rmsds)) ## Instruction: Save output for every run ## Code After: import os import statistics from cref.structure import rmsd from cref.app.terminal import download_pdb, download_fasta, predict_fasta pdbs = ['1zdd', '1gab'] runs = 5 fragment_sizes = range(5, 13, 2) number_of_clusters = range(4, 20, 1) for pdb in pdbs: output_dir = 'predictions/evaluation/{}/'.format(pdb) try: os.mkdir(output_dir) except FileExistsError as e: print(e) for fragment_size in fragment_sizes: fasta_file = output_dir + pdb + '.fasta' download_fasta(pdb, fasta_file) for n in number_of_clusters: rmsds = [] for run in range(runs): params = { 'pdb': pdb, 'fragment_size': fragment_size, 'number_of_clusters': n } prediction_output = output_dir + str(run) os.mkdir(prediction_output) output_files = predict_fasta(fasta_file, prediction_output, params) predicted_structure = output_files[0] filepath = os.path.join( os.path.dirname(predicted_structure), 'experimental_structure.pdb' ) experimental_structure = download_pdb(pdb, filepath) rmsds.append(rmsd(predicted_structure, experimental_structure)) print(pdb, fragment_size, n, statistics.mean(rmsds), statistics.pstdev(rmsds))
... pdbs = ['1zdd', '1gab'] runs = 5 fragment_sizes = range(5, 13, 2) ... prediction_output = output_dir + str(run) os.mkdir(prediction_output) output_files = predict_fasta(fasta_file, prediction_output, params) predicted_structure = output_files[0] ...
b7bb3b0782fcece12531b90a17eda98c4ae59be0
notes/templatetags/note_tags.py
notes/templatetags/note_tags.py
from django.template import Library, Node, TemplateSyntaxError from django.utils.html import escape from django.utils.http import urlquote from django.utils.safestring import mark_safe from notes.models import Note from castle.models import Profile register = Library() #----------------------------------------------------------------------------- @register.filter def note_link(note): if note is None: return u'<NULL NOTE>' if note.read_date is None: return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') else: return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') #----------------------------------------------------------------------------- @register.filter def inbox_count(profile): count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') #----------------------------------------------------------------------------- @register.filter def author_msg(profile): return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
from django.template import Library, Node, TemplateSyntaxError from django.utils.html import escape from django.utils.http import urlquote from django.utils.safestring import mark_safe from notes.models import Note from castle.models import Profile register = Library() #----------------------------------------------------------------------------- @register.filter def note_link(note): if note is None: return u'<NULL NOTE>' if note.read_date is None: return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') else: return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') #----------------------------------------------------------------------------- @register.filter def inbox_count(profile): count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() if count > 0: return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') else: return mark_safe(u'<span>' + escape(count) + u'</span>') #----------------------------------------------------------------------------- @register.filter def author_msg(profile): return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
Update to display notes count bubble only when new notes are available
Update to display notes count bubble only when new notes are available
Python
agpl-3.0
ficlatte/main,HSAR/Ficlatte,stitzelj/Ficlatte,ficlatte/main,HSAR/Ficlatte,stitzelj/Ficlatte
from django.template import Library, Node, TemplateSyntaxError from django.utils.html import escape from django.utils.http import urlquote from django.utils.safestring import mark_safe from notes.models import Note from castle.models import Profile register = Library() #----------------------------------------------------------------------------- @register.filter def note_link(note): - if note is None: + if note is None: - return u'<NULL NOTE>' + return u'<NULL NOTE>' - if note.read_date is None: + if note.read_date is None: - return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') + return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') - else: + else: - return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') + return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') #----------------------------------------------------------------------------- @register.filter def inbox_count(profile): - count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() + count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() + if count > 0: - return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') + return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') + else: + return mark_safe(u'<span>' + escape(count) + u'</span>') #----------------------------------------------------------------------------- @register.filter def author_msg(profile): - return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>') + return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
Update to display notes count bubble only when new notes are available
## Code Before: from django.template import Library, Node, TemplateSyntaxError from django.utils.html import escape from django.utils.http import urlquote from django.utils.safestring import mark_safe from notes.models import Note from castle.models import Profile register = Library() #----------------------------------------------------------------------------- @register.filter def note_link(note): if note is None: return u'<NULL NOTE>' if note.read_date is None: return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') else: return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') #----------------------------------------------------------------------------- @register.filter def inbox_count(profile): count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') #----------------------------------------------------------------------------- @register.filter def author_msg(profile): return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>') ## Instruction: Update to display notes count bubble only when new notes are available ## Code After: from django.template import Library, Node, TemplateSyntaxError from django.utils.html import escape from django.utils.http import urlquote from django.utils.safestring import mark_safe from notes.models import Note from castle.models import Profile register = Library() #----------------------------------------------------------------------------- @register.filter def note_link(note): if note is None: return u'<NULL NOTE>' if note.read_date is None: return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') else: return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') #----------------------------------------------------------------------------- @register.filter def inbox_count(profile): count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() if count > 0: return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') else: return mark_safe(u'<span>' + escape(count) + u'</span>') #----------------------------------------------------------------------------- @register.filter def author_msg(profile): return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
... def note_link(note): if note is None: return u'<NULL NOTE>' if note.read_date is None: return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>') else: return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>') ... def inbox_count(profile): count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count() if count > 0: return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>') else: return mark_safe(u'<span>' + escape(count) + u'</span>') ... def author_msg(profile): return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>') ...
448f1201a36de8ef41dadbb63cbea874dd7d5878
wechatpy/utils.py
wechatpy/utils.py
from __future__ import absolute_import, unicode_literals import hashlib import six class ObjectDict(dict): def __getattr__(self, key): if key in self: return self[key] return None def __setattr__(self, key, value): self[key] = value def check_signature(token, signature, timestamp, nonce): tmparr = [token, timestamp, nonce] tmparr.sort() tmpstr = ''.join(tmparr) tmpstr = six.binary_type(tmpstr) digest = hashlib.sha1(tmpstr).hexdigest() return digest == signature
from __future__ import absolute_import, unicode_literals import hashlib import six class ObjectDict(dict): def __getattr__(self, key): if key in self: return self[key] return None def __setattr__(self, key, value): self[key] = value def check_signature(token, signature, timestamp, nonce): tmparr = [token, timestamp, nonce] tmparr.sort() tmpstr = ''.join(tmparr) tmpstr = six.text_type(tmpstr).encode('utf-8') digest = hashlib.sha1(tmpstr).hexdigest() return digest == signature
Fix test error on Python 3
Fix test error on Python 3
Python
mit
cloverstd/wechatpy,wechatpy/wechatpy,EaseCloud/wechatpy,mruse/wechatpy,cysnake4713/wechatpy,cysnake4713/wechatpy,zhaoqz/wechatpy,navcat/wechatpy,zaihui/wechatpy,Luckyseal/wechatpy,messense/wechatpy,chenjiancan/wechatpy,chenjiancan/wechatpy,Luckyseal/wechatpy,tdautc19841202/wechatpy,navcat/wechatpy,Dufy/wechatpy,jxtech/wechatpy,Luckyseal/wechatpy,tdautc19841202/wechatpy,hunter007/wechatpy,mruse/wechatpy,EaseCloud/wechatpy,tdautc19841202/wechatpy,hunter007/wechatpy,cloverstd/wechatpy,Dufy/wechatpy,zaihui/wechatpy,zhaoqz/wechatpy,cysnake4713/wechatpy
from __future__ import absolute_import, unicode_literals import hashlib import six class ObjectDict(dict): def __getattr__(self, key): if key in self: return self[key] return None def __setattr__(self, key, value): self[key] = value def check_signature(token, signature, timestamp, nonce): tmparr = [token, timestamp, nonce] tmparr.sort() tmpstr = ''.join(tmparr) - tmpstr = six.binary_type(tmpstr) + tmpstr = six.text_type(tmpstr).encode('utf-8') digest = hashlib.sha1(tmpstr).hexdigest() return digest == signature
Fix test error on Python 3
## Code Before: from __future__ import absolute_import, unicode_literals import hashlib import six class ObjectDict(dict): def __getattr__(self, key): if key in self: return self[key] return None def __setattr__(self, key, value): self[key] = value def check_signature(token, signature, timestamp, nonce): tmparr = [token, timestamp, nonce] tmparr.sort() tmpstr = ''.join(tmparr) tmpstr = six.binary_type(tmpstr) digest = hashlib.sha1(tmpstr).hexdigest() return digest == signature ## Instruction: Fix test error on Python 3 ## Code After: from __future__ import absolute_import, unicode_literals import hashlib import six class ObjectDict(dict): def __getattr__(self, key): if key in self: return self[key] return None def __setattr__(self, key, value): self[key] = value def check_signature(token, signature, timestamp, nonce): tmparr = [token, timestamp, nonce] tmparr.sort() tmpstr = ''.join(tmparr) tmpstr = six.text_type(tmpstr).encode('utf-8') digest = hashlib.sha1(tmpstr).hexdigest() return digest == signature
# ... existing code ... tmpstr = ''.join(tmparr) tmpstr = six.text_type(tmpstr).encode('utf-8') digest = hashlib.sha1(tmpstr).hexdigest() # ... rest of the code ...
8b3ca76b980f126912de1bc8ffa067c199693eb3
cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py
cinder/db/sqlalchemy/migrate_repo/versions/061_add_snapshot_id_timestamp_to_backups.py
from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) backups.update().values(snapshot_id=None).execute() backups.create_column(data_timestamp) backups.update().values(data_timestamp=None).execute() # Copy existing created_at timestamp to data_timestamp # in the backups table. backups_list = list(backups.select().execute()) for backup in backups_list: backup_id = backup.id backups.update().\ where(backups.c.id == backup_id).\ values(data_timestamp=backup.created_at).execute()
from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) backups.create_column(data_timestamp) backups.update().values(data_timestamp=backups.c.created_at).execute()
Fix race conditions in migration 061
Fix race conditions in migration 061 Migration 061 is supposed to add new `data_timestamp` field and populate it with value of `created_at` column. This was done by selecting all the backups and doing updates one-by-one. As it wasn't done in transaction solution was prone to race condition when a new backup is added while running the migration. This means that this migration could cause problems when running in live environment. With blueprint online-schema-upgrades we want to make Cinder able to perform migrations live. A solution is to change this statement to a single DB query which updates all the rows. This commit also removes unnecessary update to snapshot_id added there. As this column is nullable it will by default be NULL, so there's no need to set it manually to that value. As before and after this commit the migration does logically the same, this should be safe even if someone is doing inter-release deployments. An alternative would be to simply add transaction to the update step in the migration, but that would effectively lock the table for longer period of time than atomic one-query update. Closes-Bug: 1530358 Change-Id: Ib8733c096a3dbe2bad00beaf5734936ffcddda33
Python
apache-2.0
phenoxim/cinder,cloudbase/cinder,j-griffith/cinder,phenoxim/cinder,Nexenta/cinder,Datera/cinder,mahak/cinder,mahak/cinder,j-griffith/cinder,ge0rgi/cinder,Nexenta/cinder,openstack/cinder,cloudbase/cinder,eharney/cinder,eharney/cinder,Hybrid-Cloud/cinder,bswartz/cinder,NetApp/cinder,Hybrid-Cloud/cinder,Datera/cinder,openstack/cinder,bswartz/cinder,NetApp/cinder,dims/cinder,dims/cinder
from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) - backups.update().values(snapshot_id=None).execute() backups.create_column(data_timestamp) - backups.update().values(data_timestamp=None).execute() + backups.update().values(data_timestamp=backups.c.created_at).execute() - # Copy existing created_at timestamp to data_timestamp - # in the backups table. - backups_list = list(backups.select().execute()) - for backup in backups_list: - backup_id = backup.id - backups.update().\ - where(backups.c.id == backup_id).\ - values(data_timestamp=backup.created_at).execute() -
Fix race conditions in migration 061
## Code Before: from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) backups.update().values(snapshot_id=None).execute() backups.create_column(data_timestamp) backups.update().values(data_timestamp=None).execute() # Copy existing created_at timestamp to data_timestamp # in the backups table. backups_list = list(backups.select().execute()) for backup in backups_list: backup_id = backup.id backups.update().\ where(backups.c.id == backup_id).\ values(data_timestamp=backup.created_at).execute() ## Instruction: Fix race conditions in migration 061 ## Code After: from sqlalchemy import Column, DateTime, MetaData, String, Table def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine backups = Table('backups', meta, autoload=True) snapshot_id = Column('snapshot_id', String(length=36)) data_timestamp = Column('data_timestamp', DateTime) backups.create_column(snapshot_id) backups.create_column(data_timestamp) backups.update().values(data_timestamp=backups.c.created_at).execute()
... backups.create_column(snapshot_id) ... backups.create_column(data_timestamp) backups.update().values(data_timestamp=backups.c.created_at).execute() ...
fc3589210e7244239acbc053d7788dc0cd264b88
app/models.py
app/models.py
from app import db class Sprinkler(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text(25)) status = db.Column(db.Text(25)) flow = db.Column(db.Integer) moisture = db.Column(db.Integer) def __init__(self, name, status, flow, moisture): self.name = name self.status = status self.flow = flow self.moisture = moisture def __repr__(self): return '<Sprinkler#%r %r, Status=%r>' % (self.id, self.name, self.status) def turn_on(self): self.status = 'ON' db.session.commit() def turn_off(self): self.status = 'OFF' db.session.commit()
from app import db class Sprinkler(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(25)) status = db.Column(db.String(25)) flow = db.Column(db.Integer) moisture = db.Column(db.Integer) def __init__(self, name, status, flow, moisture): self.name = name self.status = status self.flow = flow self.moisture = moisture def __repr__(self): return '<Sprinkler#%r %r, Status=%r>' % (self.id, self.name, self.status) def turn_on(self): self.status = 'ON' db.session.commit() def turn_off(self): self.status = 'OFF' db.session.commit()
Fix bug with SQLAlchemy, change TEXT to STRING
Fix bug with SQLAlchemy, change TEXT to STRING
Python
mit
jaredculp/sprinkler-flask-server,jaredculp/sprinkler-flask-server
from app import db class Sprinkler(db.Model): id = db.Column(db.Integer, primary_key=True) - name = db.Column(db.Text(25)) + name = db.Column(db.String(25)) - status = db.Column(db.Text(25)) + status = db.Column(db.String(25)) flow = db.Column(db.Integer) moisture = db.Column(db.Integer) def __init__(self, name, status, flow, moisture): self.name = name self.status = status self.flow = flow self.moisture = moisture def __repr__(self): return '<Sprinkler#%r %r, Status=%r>' % (self.id, self.name, self.status) def turn_on(self): self.status = 'ON' db.session.commit() def turn_off(self): self.status = 'OFF' db.session.commit()
Fix bug with SQLAlchemy, change TEXT to STRING
## Code Before: from app import db class Sprinkler(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.Text(25)) status = db.Column(db.Text(25)) flow = db.Column(db.Integer) moisture = db.Column(db.Integer) def __init__(self, name, status, flow, moisture): self.name = name self.status = status self.flow = flow self.moisture = moisture def __repr__(self): return '<Sprinkler#%r %r, Status=%r>' % (self.id, self.name, self.status) def turn_on(self): self.status = 'ON' db.session.commit() def turn_off(self): self.status = 'OFF' db.session.commit() ## Instruction: Fix bug with SQLAlchemy, change TEXT to STRING ## Code After: from app import db class Sprinkler(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(25)) status = db.Column(db.String(25)) flow = db.Column(db.Integer) moisture = db.Column(db.Integer) def __init__(self, name, status, flow, moisture): self.name = name self.status = status self.flow = flow self.moisture = moisture def __repr__(self): return '<Sprinkler#%r %r, Status=%r>' % (self.id, self.name, self.status) def turn_on(self): self.status = 'ON' db.session.commit() def turn_off(self): self.status = 'OFF' db.session.commit()
# ... existing code ... id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(25)) status = db.Column(db.String(25)) flow = db.Column(db.Integer) # ... rest of the code ...
02ef868100ab190b5fa3bff5bad4891f21101ee2
getkey/__init__.py
getkey/__init__.py
from __future__ import absolute_import from .platforms import platform __platform = platform() getkey = __platform.getkey keys = __platform.keys key = keys # alias bang = __platform.bang # __all__ = [getkey, key, bang, platform] __version__ = '0.6'
from __future__ import absolute_import, print_function import sys from .platforms import platform, PlatformError try: __platform = platform() except PlatformError as err: print('Error initializing standard platform: {}'.format(err.args[0]), file=sys.stderr) else: getkey = __platform.getkey keys = __platform.keys key = keys # alias bang = __platform.bang # __all__ = [getkey, key, bang, platform] __version__ = '0.6'
Handle test environment with no real stdin
Handle test environment with no real stdin
Python
mit
kcsaff/getkey
- from __future__ import absolute_import + from __future__ import absolute_import, print_function + import sys - from .platforms import platform + from .platforms import platform, PlatformError + try: - __platform = platform() + __platform = platform() + except PlatformError as err: + print('Error initializing standard platform: {}'.format(err.args[0]), + file=sys.stderr) + else: - getkey = __platform.getkey + getkey = __platform.getkey - keys = __platform.keys + keys = __platform.keys - key = keys # alias + key = keys # alias - bang = __platform.bang + bang = __platform.bang # __all__ = [getkey, key, bang, platform] __version__ = '0.6'
Handle test environment with no real stdin
## Code Before: from __future__ import absolute_import from .platforms import platform __platform = platform() getkey = __platform.getkey keys = __platform.keys key = keys # alias bang = __platform.bang # __all__ = [getkey, key, bang, platform] __version__ = '0.6' ## Instruction: Handle test environment with no real stdin ## Code After: from __future__ import absolute_import, print_function import sys from .platforms import platform, PlatformError try: __platform = platform() except PlatformError as err: print('Error initializing standard platform: {}'.format(err.args[0]), file=sys.stderr) else: getkey = __platform.getkey keys = __platform.keys key = keys # alias bang = __platform.bang # __all__ = [getkey, key, bang, platform] __version__ = '0.6'
... from __future__ import absolute_import, print_function import sys from .platforms import platform, PlatformError try: __platform = platform() except PlatformError as err: print('Error initializing standard platform: {}'.format(err.args[0]), file=sys.stderr) else: getkey = __platform.getkey keys = __platform.keys key = keys # alias bang = __platform.bang ...
d63480d00206a08a3e41c6af7512181198aced05
object_join.py
object_join.py
__author__ = 'stuart' class JoinedObject(object): def __init__(self, left, right): self.left = left self.right = right def __getattr__(self, attr): if attr == 'left': return self.left elif attr == 'right': return self.right else: return self.get_from_sources(attr) def __repr__(self): return '<{} object at {}>'.format( self.left.__class__.__name__ + self.right.__class__.__name__, id(self)) def get_from_sources(self, attr): if hasattr(self.left, attr): return getattr(self.left, attr) elif hasattr(self.right, attr): return getattr(self.right, attr) else: raise AttributeError( "Neither of joined object's parents ({}, {}), have attribute " "'{}'".format(self.left.__class__.__name__, self.left.__class__.__name__, attr))
__author__ = 'stuart' class JoinedObject(object): def __init__(self, left, right): self.left = left self.right = right def __getattr__(self, attr): if attr == 'left': return self.left elif attr == 'right': return self.right else: return self.get_from_sources(attr) def __repr__(self): return '<{} object at {}>'.format( self.left.__class__.__name__ + self.right.__class__.__name__, id(self)) def __dir__(self): attrs = list(set(dir(self.left) + dir(self.right) + ['left', 'right'])) return sorted(attrs) def get_from_sources(self, attr): if hasattr(self.left, attr): return getattr(self.left, attr) elif hasattr(self.right, attr): return getattr(self.right, attr) else: raise AttributeError( "Neither of joined object's parents ({}, {}), have attribute " "'{}'".format(self.left.__class__.__name__, self.right.__class__.__name__, attr))
Add proper `__dir__` reporting, fix bug in AttributeError
Add proper `__dir__` reporting, fix bug in AttributeError
Python
mit
StuartAxelOwen/datastreams
__author__ = 'stuart' class JoinedObject(object): def __init__(self, left, right): self.left = left self.right = right def __getattr__(self, attr): if attr == 'left': return self.left elif attr == 'right': return self.right else: return self.get_from_sources(attr) def __repr__(self): return '<{} object at {}>'.format( self.left.__class__.__name__ + self.right.__class__.__name__, id(self)) + def __dir__(self): + attrs = list(set(dir(self.left) + dir(self.right) + ['left', 'right'])) + return sorted(attrs) + def get_from_sources(self, attr): if hasattr(self.left, attr): return getattr(self.left, attr) elif hasattr(self.right, attr): return getattr(self.right, attr) else: raise AttributeError( "Neither of joined object's parents ({}, {}), have attribute " "'{}'".format(self.left.__class__.__name__, - self.left.__class__.__name__, attr)) + self.right.__class__.__name__, attr))
Add proper `__dir__` reporting, fix bug in AttributeError
## Code Before: __author__ = 'stuart' class JoinedObject(object): def __init__(self, left, right): self.left = left self.right = right def __getattr__(self, attr): if attr == 'left': return self.left elif attr == 'right': return self.right else: return self.get_from_sources(attr) def __repr__(self): return '<{} object at {}>'.format( self.left.__class__.__name__ + self.right.__class__.__name__, id(self)) def get_from_sources(self, attr): if hasattr(self.left, attr): return getattr(self.left, attr) elif hasattr(self.right, attr): return getattr(self.right, attr) else: raise AttributeError( "Neither of joined object's parents ({}, {}), have attribute " "'{}'".format(self.left.__class__.__name__, self.left.__class__.__name__, attr)) ## Instruction: Add proper `__dir__` reporting, fix bug in AttributeError ## Code After: __author__ = 'stuart' class JoinedObject(object): def __init__(self, left, right): self.left = left self.right = right def __getattr__(self, attr): if attr == 'left': return self.left elif attr == 'right': return self.right else: return self.get_from_sources(attr) def __repr__(self): return '<{} object at {}>'.format( self.left.__class__.__name__ + self.right.__class__.__name__, id(self)) def __dir__(self): attrs = list(set(dir(self.left) + dir(self.right) + ['left', 'right'])) return sorted(attrs) def get_from_sources(self, attr): if hasattr(self.left, attr): return getattr(self.left, attr) elif hasattr(self.right, attr): return getattr(self.right, attr) else: raise AttributeError( "Neither of joined object's parents ({}, {}), have attribute " "'{}'".format(self.left.__class__.__name__, self.right.__class__.__name__, attr))
// ... existing code ... def __dir__(self): attrs = list(set(dir(self.left) + dir(self.right) + ['left', 'right'])) return sorted(attrs) def get_from_sources(self, attr): // ... modified code ... "'{}'".format(self.left.__class__.__name__, self.right.__class__.__name__, attr)) // ... rest of the code ...
a48e2a2f9f367994ab131201db2f07eee5788642
interleaving/interleaving_method.py
interleaving/interleaving_method.py
class InterleavingMethod(object): ''' Interleaving ''' def interleave(self, a, b): ''' a: a list of document IDs b: a list of document IDs Return an instance of Ranking ''' raise NotImplementedError() def multileave(self, *lists): ''' *lists: lists of document IDs Return an instance of Ranking ''' raise NotImplementedError() def evaluate(self, ranking, clicks): ''' ranking: an instance of Ranking generated by Balanced.interleave clicks: a list of indices clicked by a user Return one of the following tuples: - (1, 0): Ranking 'a' won - (0, 1): Ranking 'b' won - (0, 0): Tie ''' raise NotImplementedError()
class InterleavingMethod(object): ''' Interleaving ''' def interleave(self, k, a, b): ''' k: the maximum length of resultant interleaving a: a list of document IDs b: a list of document IDs Return an instance of Ranking ''' raise NotImplementedError() def multileave(self, k, *lists): ''' k: the maximum length of resultant multileaving *lists: lists of document IDs Return an instance of Ranking ''' raise NotImplementedError() def evaluate(self, ranking, clicks): ''' ranking: an instance of Ranking generated by Balanced.interleave clicks: a list of indices clicked by a user Return one of the following tuples: - (1, 0): Ranking 'a' won - (0, 1): Ranking 'b' won - (0, 0): Tie ''' raise NotImplementedError()
Add a length argument for InterleavingMethod.interleave and multileave
Add a length argument for InterleavingMethod.interleave and multileave
Python
mit
mpkato/interleaving
class InterleavingMethod(object): ''' Interleaving ''' - def interleave(self, a, b): + def interleave(self, k, a, b): ''' + k: the maximum length of resultant interleaving a: a list of document IDs b: a list of document IDs Return an instance of Ranking ''' raise NotImplementedError() - def multileave(self, *lists): + def multileave(self, k, *lists): ''' + k: the maximum length of resultant multileaving *lists: lists of document IDs Return an instance of Ranking ''' raise NotImplementedError() def evaluate(self, ranking, clicks): ''' ranking: an instance of Ranking generated by Balanced.interleave clicks: a list of indices clicked by a user Return one of the following tuples: - (1, 0): Ranking 'a' won - (0, 1): Ranking 'b' won - (0, 0): Tie ''' raise NotImplementedError()
Add a length argument for InterleavingMethod.interleave and multileave
## Code Before: class InterleavingMethod(object): ''' Interleaving ''' def interleave(self, a, b): ''' a: a list of document IDs b: a list of document IDs Return an instance of Ranking ''' raise NotImplementedError() def multileave(self, *lists): ''' *lists: lists of document IDs Return an instance of Ranking ''' raise NotImplementedError() def evaluate(self, ranking, clicks): ''' ranking: an instance of Ranking generated by Balanced.interleave clicks: a list of indices clicked by a user Return one of the following tuples: - (1, 0): Ranking 'a' won - (0, 1): Ranking 'b' won - (0, 0): Tie ''' raise NotImplementedError() ## Instruction: Add a length argument for InterleavingMethod.interleave and multileave ## Code After: class InterleavingMethod(object): ''' Interleaving ''' def interleave(self, k, a, b): ''' k: the maximum length of resultant interleaving a: a list of document IDs b: a list of document IDs Return an instance of Ranking ''' raise NotImplementedError() def multileave(self, k, *lists): ''' k: the maximum length of resultant multileaving *lists: lists of document IDs Return an instance of Ranking ''' raise NotImplementedError() def evaluate(self, ranking, clicks): ''' ranking: an instance of Ranking generated by Balanced.interleave clicks: a list of indices clicked by a user Return one of the following tuples: - (1, 0): Ranking 'a' won - (0, 1): Ranking 'b' won - (0, 0): Tie ''' raise NotImplementedError()
// ... existing code ... ''' def interleave(self, k, a, b): ''' k: the maximum length of resultant interleaving a: a list of document IDs // ... modified code ... def multileave(self, k, *lists): ''' k: the maximum length of resultant multileaving *lists: lists of document IDs // ... rest of the code ...
f1bc5d1b491926ccbe098a28a5b08a60741e5bc5
this_app/models.py
this_app/models.py
from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class User(UserMixin): """Represents a user who can Create, Read, Update & Delete his own bucketlists""" counter = 0 users = {} def __init__(self, email, username, password): """Constructor class to initialize class""" self.email = email self.username = username self.password = password User.counter += 1 def create_user(self): """ Class to create and store a user object """ self.users.update({ self.counter: { 'email': self.email, 'username': self.username, 'password': self.password } }) return self.users def is_active(self): """True, as all users are active.""" return True def get_id(self): """Return the email address to satisfy Flask-Login's requirements.""" return self.email def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False
from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class User(UserMixin): """Represents a user who can Create, Read, Update & Delete his own bucketlists""" counter = 0 users = {} def __init__(self, email, username, password): """Constructor class to initialize class""" self.email = email self.username = username self.password = password User.counter += 1 def create_user(self): """ Class to create and store a user object """ self.users.update({ self.counter: { 'email': self.email, 'username': self.username, 'password': self.password } }) return self.users @property def is_active(self): """True, as all users are active.""" return True def get_id(self): """Return the id """ users_dict = self.users.items() user = {key:value for key, value in users_dict} return str(user.keys()) @property def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False
Use autoincrementing ID as primary key
Use autoincrementing ID as primary key
Python
mit
borenho/flask-bucketlist,borenho/flask-bucketlist
from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class User(UserMixin): """Represents a user who can Create, Read, Update & Delete his own bucketlists""" counter = 0 users = {} def __init__(self, email, username, password): """Constructor class to initialize class""" self.email = email self.username = username self.password = password User.counter += 1 def create_user(self): """ Class to create and store a user object """ self.users.update({ self.counter: { 'email': self.email, 'username': self.username, 'password': self.password } }) return self.users + @property def is_active(self): """True, as all users are active.""" return True def get_id(self): - """Return the email address to satisfy Flask-Login's requirements.""" - return self.email + """Return the id """ + users_dict = self.users.items() + user = {key:value for key, value in users_dict} + return str(user.keys()) + @property def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False
Use autoincrementing ID as primary key
## Code Before: from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class User(UserMixin): """Represents a user who can Create, Read, Update & Delete his own bucketlists""" counter = 0 users = {} def __init__(self, email, username, password): """Constructor class to initialize class""" self.email = email self.username = username self.password = password User.counter += 1 def create_user(self): """ Class to create and store a user object """ self.users.update({ self.counter: { 'email': self.email, 'username': self.username, 'password': self.password } }) return self.users def is_active(self): """True, as all users are active.""" return True def get_id(self): """Return the email address to satisfy Flask-Login's requirements.""" return self.email def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False ## Instruction: Use autoincrementing ID as primary key ## Code After: from flask_login import UserMixin from werkzeug.security import generate_password_hash, check_password_hash class User(UserMixin): """Represents a user who can Create, Read, Update & Delete his own bucketlists""" counter = 0 users = {} def __init__(self, email, username, password): """Constructor class to initialize class""" self.email = email self.username = username self.password = password User.counter += 1 def create_user(self): """ Class to create and store a user object """ self.users.update({ self.counter: { 'email': self.email, 'username': self.username, 'password': self.password } }) return self.users @property def is_active(self): """True, as all users are active.""" return True def get_id(self): """Return the id """ users_dict = self.users.items() user = {key:value for key, value in users_dict} return str(user.keys()) @property def is_authenticated(self): """Return True if the user is authenticated.""" return True def is_anonymous(self): """False, as anonymous users aren't supported.""" return False
# ... existing code ... @property def is_active(self): # ... modified code ... def get_id(self): """Return the id """ users_dict = self.users.items() user = {key:value for key, value in users_dict} return str(user.keys()) @property def is_authenticated(self): # ... rest of the code ...
0241e253c68ca6862a3da26d29a649f65c27ae36
demos/chatroom/experiment.py
demos/chatroom/experiment.py
"""Coordination chatroom game.""" import dallinger as dlgr from dallinger.config import get_config try: unicode = unicode except NameError: # Python 3 unicode = str config = get_config() def extra_settings(): config.register('network', unicode) config.register('n', int) class CoordinationChatroom(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" super(CoordinationChatroom, self).__init__(session) self.experiment_repeats = 1 self.num_participants = config.get('n') self.initial_recruitment_size = self.num_participants self.quorum = self.num_participants self.config = config if not self.config.ready: self.config.load_config() self.setup() def create_network(self): """Create a new network by reading the configuration file.""" class_ = getattr( dlgr.networks, self.config.get('network') ) return class_(max_size=self.num_participants) def info_post_request(self, node, info): """Run when a request to create an info is complete.""" for agent in node.neighbors(): node.transmit(what=info, to_whom=agent) def create_node(self, participant, network): """Create a node for a participant.""" return dlgr.nodes.Agent(network=network, participant=participant)
"""Coordination chatroom game.""" import dallinger as dlgr from dallinger.compat import unicode from dallinger.config import get_config config = get_config() def extra_settings(): config.register('network', unicode) config.register('n', int) class CoordinationChatroom(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" super(CoordinationChatroom, self).__init__(session) self.experiment_repeats = 1 self.num_participants = config.get('n') self.initial_recruitment_size = self.num_participants self.quorum = self.num_participants self.config = config if not self.config.ready: self.config.load_config() self.setup() def create_network(self): """Create a new network by reading the configuration file.""" class_ = getattr( dlgr.networks, self.config.get('network') ) return class_(max_size=self.num_participants) def info_post_request(self, node, info): """Run when a request to create an info is complete.""" for agent in node.neighbors(): node.transmit(what=info, to_whom=agent) def create_node(self, participant, network): """Create a node for a participant.""" return dlgr.nodes.Agent(network=network, participant=participant)
Use compat for unicode import
Use compat for unicode import
Python
mit
Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger
"""Coordination chatroom game.""" import dallinger as dlgr + from dallinger.compat import unicode from dallinger.config import get_config - try: - unicode = unicode - except NameError: # Python 3 - unicode = str config = get_config() def extra_settings(): config.register('network', unicode) config.register('n', int) class CoordinationChatroom(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" super(CoordinationChatroom, self).__init__(session) self.experiment_repeats = 1 self.num_participants = config.get('n') self.initial_recruitment_size = self.num_participants self.quorum = self.num_participants self.config = config if not self.config.ready: self.config.load_config() self.setup() def create_network(self): """Create a new network by reading the configuration file.""" class_ = getattr( dlgr.networks, self.config.get('network') ) return class_(max_size=self.num_participants) def info_post_request(self, node, info): """Run when a request to create an info is complete.""" for agent in node.neighbors(): node.transmit(what=info, to_whom=agent) def create_node(self, participant, network): """Create a node for a participant.""" return dlgr.nodes.Agent(network=network, participant=participant)
Use compat for unicode import
## Code Before: """Coordination chatroom game.""" import dallinger as dlgr from dallinger.config import get_config try: unicode = unicode except NameError: # Python 3 unicode = str config = get_config() def extra_settings(): config.register('network', unicode) config.register('n', int) class CoordinationChatroom(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" super(CoordinationChatroom, self).__init__(session) self.experiment_repeats = 1 self.num_participants = config.get('n') self.initial_recruitment_size = self.num_participants self.quorum = self.num_participants self.config = config if not self.config.ready: self.config.load_config() self.setup() def create_network(self): """Create a new network by reading the configuration file.""" class_ = getattr( dlgr.networks, self.config.get('network') ) return class_(max_size=self.num_participants) def info_post_request(self, node, info): """Run when a request to create an info is complete.""" for agent in node.neighbors(): node.transmit(what=info, to_whom=agent) def create_node(self, participant, network): """Create a node for a participant.""" return dlgr.nodes.Agent(network=network, participant=participant) ## Instruction: Use compat for unicode import ## Code After: """Coordination chatroom game.""" import dallinger as dlgr from dallinger.compat import unicode from dallinger.config import get_config config = get_config() def extra_settings(): config.register('network', unicode) config.register('n', int) class CoordinationChatroom(dlgr.experiments.Experiment): """Define the structure of the experiment.""" def __init__(self, session): """Initialize the experiment.""" super(CoordinationChatroom, self).__init__(session) self.experiment_repeats = 1 self.num_participants = config.get('n') self.initial_recruitment_size = self.num_participants self.quorum = self.num_participants self.config = config if not self.config.ready: self.config.load_config() self.setup() def create_network(self): """Create a new network by reading the configuration file.""" class_ = getattr( dlgr.networks, self.config.get('network') ) return class_(max_size=self.num_participants) def info_post_request(self, node, info): """Run when a request to create an info is complete.""" for agent in node.neighbors(): node.transmit(what=info, to_whom=agent) def create_node(self, participant, network): """Create a node for a participant.""" return dlgr.nodes.Agent(network=network, participant=participant)
# ... existing code ... import dallinger as dlgr from dallinger.compat import unicode from dallinger.config import get_config # ... rest of the code ...
2267f31ba91ea649c54a51ab3e8f3babbe72f44e
openliveq/collection.py
openliveq/collection.py
from collections import defaultdict class Collection(object): DOC_FROM = ["question_body", "best_answer_body"] def __init__(self): ''' Compute the following statistics df: document frequency cf: collection frequency dn: total number of documents cn: total number of words ''' self.df = defaultdict(int) self.cf = defaultdict(int) self.dn = 0 self.cn = 0 def add(self, wordsets): ''' Add a question ''' for label in self.DOC_FROM: for w in set(wordsets[label].keys()): self.df[w] += 1 self.cf[w] += wordsets[label][w] self.cn += wordsets[label][w] self.dn += 1
from collections import defaultdict class Collection(object): DOC_FROM = ["question_body", "best_answer_body"] def __init__(self): ''' Compute the following statistics df: document frequency cf: collection frequency dn: total number of documents cn: total number of words ''' self.df = defaultdict(int) self.cf = defaultdict(int) self.dn = 0 self.cn = 0 def add(self, wordsets): ''' Add a question ''' for label in self.DOC_FROM: for w in set(wordsets[label].keys()): self.df[w] += 1 self.cf[w] += wordsets[label][w] self.cn += wordsets[label][w] self.dn += 1 @property def avgdlen(self): return float(self.cn) / self.dn
Add avddlen property to Collection
Add avddlen property to Collection
Python
mit
mpkato/openliveq
from collections import defaultdict class Collection(object): DOC_FROM = ["question_body", "best_answer_body"] def __init__(self): ''' Compute the following statistics df: document frequency cf: collection frequency dn: total number of documents cn: total number of words ''' self.df = defaultdict(int) self.cf = defaultdict(int) self.dn = 0 self.cn = 0 def add(self, wordsets): ''' Add a question ''' for label in self.DOC_FROM: for w in set(wordsets[label].keys()): self.df[w] += 1 self.cf[w] += wordsets[label][w] self.cn += wordsets[label][w] self.dn += 1 + @property + def avgdlen(self): + return float(self.cn) / self.dn +
Add avddlen property to Collection
## Code Before: from collections import defaultdict class Collection(object): DOC_FROM = ["question_body", "best_answer_body"] def __init__(self): ''' Compute the following statistics df: document frequency cf: collection frequency dn: total number of documents cn: total number of words ''' self.df = defaultdict(int) self.cf = defaultdict(int) self.dn = 0 self.cn = 0 def add(self, wordsets): ''' Add a question ''' for label in self.DOC_FROM: for w in set(wordsets[label].keys()): self.df[w] += 1 self.cf[w] += wordsets[label][w] self.cn += wordsets[label][w] self.dn += 1 ## Instruction: Add avddlen property to Collection ## Code After: from collections import defaultdict class Collection(object): DOC_FROM = ["question_body", "best_answer_body"] def __init__(self): ''' Compute the following statistics df: document frequency cf: collection frequency dn: total number of documents cn: total number of words ''' self.df = defaultdict(int) self.cf = defaultdict(int) self.dn = 0 self.cn = 0 def add(self, wordsets): ''' Add a question ''' for label in self.DOC_FROM: for w in set(wordsets[label].keys()): self.df[w] += 1 self.cf[w] += wordsets[label][w] self.cn += wordsets[label][w] self.dn += 1 @property def avgdlen(self): return float(self.cn) / self.dn
... self.dn += 1 @property def avgdlen(self): return float(self.cn) / self.dn ...
41fe44e99361d9006a8b196e9b886ffdb3e8e460
functional_tests/test_evexml.py
functional_tests/test_evexml.py
from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import tag from django.shortcuts import reverse from selenium import webdriver from selenium.webdriver.common.keys import Keys MAX_WAIT = 10 @tag('functional') class SubmissionTest(StaticLiveServerTestCase): """Tests for users who are submitting xml api key. """ @classmethod def setUpClass(cls): super(SubmissionTest, cls).setUpClass() cls.browser = webdriver.Chrome() cls.browser.maximize_window() cls.browser.implicitly_wait(MAX_WAIT) super(SubmissionTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.browser.refresh() cls.browser.quit() super(SubmissionTest, cls).tearDownClass() def tearDown(self): self.browser.refresh() def test_user_can_see_apikey_form(self): """A user should be able to see the form for submitting api keys. """ # They browse to the eve api keys page. url = self.live_server_url + reverse('eveapi_submit') self.browser.get(self.live_server_url) # They see input boxes for keyID and vCode. keyid_input = self.browser.find_element_by_name('keyID') vcode_input = self.browser.find_element_by_name('vCode')
from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import tag from django.shortcuts import reverse from selenium import webdriver from selenium.webdriver.common.keys import Keys MAX_WAIT = 10 @tag('functional') class SubmissionTest(StaticLiveServerTestCase): """Tests for users who are submitting xml api key. """ @classmethod def setUpClass(cls): super(SubmissionTest, cls).setUpClass() cls.browser = webdriver.Chrome() cls.browser.maximize_window() cls.browser.implicitly_wait(MAX_WAIT) super(SubmissionTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.browser.refresh() cls.browser.quit() super(SubmissionTest, cls).tearDownClass() def tearDown(self): self.browser.refresh() def test_user_can_see_apikey_form(self): """A user should be able to see the form for submitting api keys. """ # They browse to the eve api keys page. url = self.live_server_url + reverse('eveapi_add') self.browser.get(url) # They see input boxes for keyID and vCode. keyid_input = self.browser.find_element_by_name('keyID') vcode_input = self.browser.find_element_by_name('vCode')
Make test get correct url
Make test get correct url
Python
mit
randomic/aniauth-tdd,randomic/aniauth-tdd
from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import tag from django.shortcuts import reverse from selenium import webdriver from selenium.webdriver.common.keys import Keys MAX_WAIT = 10 @tag('functional') class SubmissionTest(StaticLiveServerTestCase): """Tests for users who are submitting xml api key. """ @classmethod def setUpClass(cls): super(SubmissionTest, cls).setUpClass() cls.browser = webdriver.Chrome() cls.browser.maximize_window() cls.browser.implicitly_wait(MAX_WAIT) super(SubmissionTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.browser.refresh() cls.browser.quit() super(SubmissionTest, cls).tearDownClass() def tearDown(self): self.browser.refresh() def test_user_can_see_apikey_form(self): """A user should be able to see the form for submitting api keys. """ # They browse to the eve api keys page. - url = self.live_server_url + reverse('eveapi_submit') + url = self.live_server_url + reverse('eveapi_add') - self.browser.get(self.live_server_url) + self.browser.get(url) # They see input boxes for keyID and vCode. keyid_input = self.browser.find_element_by_name('keyID') vcode_input = self.browser.find_element_by_name('vCode')
Make test get correct url
## Code Before: from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import tag from django.shortcuts import reverse from selenium import webdriver from selenium.webdriver.common.keys import Keys MAX_WAIT = 10 @tag('functional') class SubmissionTest(StaticLiveServerTestCase): """Tests for users who are submitting xml api key. """ @classmethod def setUpClass(cls): super(SubmissionTest, cls).setUpClass() cls.browser = webdriver.Chrome() cls.browser.maximize_window() cls.browser.implicitly_wait(MAX_WAIT) super(SubmissionTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.browser.refresh() cls.browser.quit() super(SubmissionTest, cls).tearDownClass() def tearDown(self): self.browser.refresh() def test_user_can_see_apikey_form(self): """A user should be able to see the form for submitting api keys. """ # They browse to the eve api keys page. url = self.live_server_url + reverse('eveapi_submit') self.browser.get(self.live_server_url) # They see input boxes for keyID and vCode. keyid_input = self.browser.find_element_by_name('keyID') vcode_input = self.browser.find_element_by_name('vCode') ## Instruction: Make test get correct url ## Code After: from django.contrib.staticfiles.testing import StaticLiveServerTestCase from django.test import tag from django.shortcuts import reverse from selenium import webdriver from selenium.webdriver.common.keys import Keys MAX_WAIT = 10 @tag('functional') class SubmissionTest(StaticLiveServerTestCase): """Tests for users who are submitting xml api key. """ @classmethod def setUpClass(cls): super(SubmissionTest, cls).setUpClass() cls.browser = webdriver.Chrome() cls.browser.maximize_window() cls.browser.implicitly_wait(MAX_WAIT) super(SubmissionTest, cls).setUpClass() @classmethod def tearDownClass(cls): cls.browser.refresh() cls.browser.quit() super(SubmissionTest, cls).tearDownClass() def tearDown(self): self.browser.refresh() def test_user_can_see_apikey_form(self): """A user should be able to see the form for submitting api keys. """ # They browse to the eve api keys page. url = self.live_server_url + reverse('eveapi_add') self.browser.get(url) # They see input boxes for keyID and vCode. keyid_input = self.browser.find_element_by_name('keyID') vcode_input = self.browser.find_element_by_name('vCode')
... # They browse to the eve api keys page. url = self.live_server_url + reverse('eveapi_add') self.browser.get(url) # They see input boxes for keyID and vCode. ...
596613c964311104098e64eeb349216bc7cd0023
saleor/demo/views.py
saleor/demo/views.py
from django.conf import settings from django.shortcuts import render from ..graphql.views import API_PATH, GraphQLView EXAMPLE_QUERY = """# Welcome to Saleor GraphQL API! # # Type queries into this side of the screen, and you will see # intelligent typeaheads aware of the current GraphQL type schema # and live syntax and validation errors highlighted within the text. # # Here is an example query to fetch a list of products: # { products(first: 5, channel: "%(channel_slug)s") { edges { node { id name description } } } } """ % { "channel_slug": settings.DEFAULT_CHANNEL_SLUG } class DemoGraphQLView(GraphQLView): def render_playground(self, request): ctx = { "query": EXAMPLE_QUERY, "api_url": request.build_absolute_uri(str(API_PATH)), } return render(request, "graphql/playground.html", ctx)
from django.conf import settings from django.shortcuts import render from ..graphql.views import GraphQLView EXAMPLE_QUERY = """# Welcome to Saleor GraphQL API! # # Type queries into this side of the screen, and you will see # intelligent typeaheads aware of the current GraphQL type schema # and live syntax and validation errors highlighted within the text. # # Here is an example query to fetch a list of products: # { products(first: 5, channel: "%(channel_slug)s") { edges { node { id name description } } } } """ % { "channel_slug": settings.DEFAULT_CHANNEL_SLUG } class DemoGraphQLView(GraphQLView): def render_playground(self, request): pwa_origin = settings.PWA_ORIGINS[0] ctx = { "query": EXAMPLE_QUERY, "api_url": f"https://{pwa_origin}/graphql/", } return render(request, "graphql/playground.html", ctx)
Fix playground CSP for demo if deployed under proxied domain
Fix playground CSP for demo if deployed under proxied domain
Python
bsd-3-clause
mociepka/saleor,mociepka/saleor,mociepka/saleor
from django.conf import settings from django.shortcuts import render - from ..graphql.views import API_PATH, GraphQLView + from ..graphql.views import GraphQLView EXAMPLE_QUERY = """# Welcome to Saleor GraphQL API! # # Type queries into this side of the screen, and you will see # intelligent typeaheads aware of the current GraphQL type schema # and live syntax and validation errors highlighted within the text. # # Here is an example query to fetch a list of products: # { products(first: 5, channel: "%(channel_slug)s") { edges { node { id name description } } } } """ % { "channel_slug": settings.DEFAULT_CHANNEL_SLUG } class DemoGraphQLView(GraphQLView): def render_playground(self, request): + pwa_origin = settings.PWA_ORIGINS[0] ctx = { "query": EXAMPLE_QUERY, - "api_url": request.build_absolute_uri(str(API_PATH)), + "api_url": f"https://{pwa_origin}/graphql/", } return render(request, "graphql/playground.html", ctx)
Fix playground CSP for demo if deployed under proxied domain
## Code Before: from django.conf import settings from django.shortcuts import render from ..graphql.views import API_PATH, GraphQLView EXAMPLE_QUERY = """# Welcome to Saleor GraphQL API! # # Type queries into this side of the screen, and you will see # intelligent typeaheads aware of the current GraphQL type schema # and live syntax and validation errors highlighted within the text. # # Here is an example query to fetch a list of products: # { products(first: 5, channel: "%(channel_slug)s") { edges { node { id name description } } } } """ % { "channel_slug": settings.DEFAULT_CHANNEL_SLUG } class DemoGraphQLView(GraphQLView): def render_playground(self, request): ctx = { "query": EXAMPLE_QUERY, "api_url": request.build_absolute_uri(str(API_PATH)), } return render(request, "graphql/playground.html", ctx) ## Instruction: Fix playground CSP for demo if deployed under proxied domain ## Code After: from django.conf import settings from django.shortcuts import render from ..graphql.views import GraphQLView EXAMPLE_QUERY = """# Welcome to Saleor GraphQL API! # # Type queries into this side of the screen, and you will see # intelligent typeaheads aware of the current GraphQL type schema # and live syntax and validation errors highlighted within the text. # # Here is an example query to fetch a list of products: # { products(first: 5, channel: "%(channel_slug)s") { edges { node { id name description } } } } """ % { "channel_slug": settings.DEFAULT_CHANNEL_SLUG } class DemoGraphQLView(GraphQLView): def render_playground(self, request): pwa_origin = settings.PWA_ORIGINS[0] ctx = { "query": EXAMPLE_QUERY, "api_url": f"https://{pwa_origin}/graphql/", } return render(request, "graphql/playground.html", ctx)
# ... existing code ... from ..graphql.views import GraphQLView # ... modified code ... def render_playground(self, request): pwa_origin = settings.PWA_ORIGINS[0] ctx = { ... "query": EXAMPLE_QUERY, "api_url": f"https://{pwa_origin}/graphql/", } # ... rest of the code ...
4706d6feaff7057d04def0544e291900a754558e
nbgrader/apps/solutionapp.py
nbgrader/apps/solutionapp.py
from IPython.config.loader import Config from IPython.config.application import catch_config_error from IPython.utils.traitlets import Unicode from nbgrader.apps.customnbconvertapp import CustomNbConvertApp class SolutionApp(CustomNbConvertApp): name = Unicode(u'nbgrader-solution') description = Unicode(u'Prepare a solution version of an assignment') def _export_format_default(self): return 'notebook' def build_extra_config(self): self.extra_config = Config() self.extra_config.Exporter.preprocessors = [ 'nbgrader.preprocessors.IncludeHeaderFooter', 'nbgrader.preprocessors.TableOfContents', 'nbgrader.preprocessors.RenderSolutions', 'nbgrader.preprocessors.ExtractTests', 'IPython.nbconvert.preprocessors.ExecutePreprocessor' ] self.extra_config.RenderSolutions.solution = True self.config.merge(self.extra_config)
from IPython.config.loader import Config from IPython.config.application import catch_config_error from IPython.utils.traitlets import Unicode from nbgrader.apps.customnbconvertapp import CustomNbConvertApp class SolutionApp(CustomNbConvertApp): name = Unicode(u'nbgrader-solution') description = Unicode(u'Prepare a solution version of an assignment') def _export_format_default(self): return 'notebook' def build_extra_config(self): self.extra_config = Config() self.extra_config.Exporter.preprocessors = [ 'nbgrader.preprocessors.IncludeHeaderFooter', 'nbgrader.preprocessors.TableOfContents', 'nbgrader.preprocessors.RenderSolutions', 'nbgrader.preprocessors.ExtractTests', 'IPython.nbconvert.preprocessors.ExecutePreprocessor' ] self.extra_config.RenderSolutions.solution = True self.extra_config.NbGraderApp.writer_class = 'IPython.nbconvert.writers.FilesWriter' self.config.merge(self.extra_config)
Add files writer to solution app
Add files writer to solution app
Python
bsd-3-clause
ellisonbg/nbgrader,jupyter/nbgrader,modulexcite/nbgrader,ellisonbg/nbgrader,modulexcite/nbgrader,jupyter/nbgrader,ellisonbg/nbgrader,dementrock/nbgrader,jdfreder/nbgrader,jupyter/nbgrader,EdwardJKim/nbgrader,jhamrick/nbgrader,MatKallada/nbgrader,dementrock/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,alope107/nbgrader,alope107/nbgrader,EdwardJKim/nbgrader,ellisonbg/nbgrader,jhamrick/nbgrader,jupyter/nbgrader,jdfreder/nbgrader,jhamrick/nbgrader,EdwardJKim/nbgrader,EdwardJKim/nbgrader,MatKallada/nbgrader
from IPython.config.loader import Config from IPython.config.application import catch_config_error from IPython.utils.traitlets import Unicode from nbgrader.apps.customnbconvertapp import CustomNbConvertApp class SolutionApp(CustomNbConvertApp): name = Unicode(u'nbgrader-solution') description = Unicode(u'Prepare a solution version of an assignment') def _export_format_default(self): return 'notebook' def build_extra_config(self): self.extra_config = Config() self.extra_config.Exporter.preprocessors = [ 'nbgrader.preprocessors.IncludeHeaderFooter', 'nbgrader.preprocessors.TableOfContents', 'nbgrader.preprocessors.RenderSolutions', 'nbgrader.preprocessors.ExtractTests', 'IPython.nbconvert.preprocessors.ExecutePreprocessor' ] self.extra_config.RenderSolutions.solution = True + self.extra_config.NbGraderApp.writer_class = 'IPython.nbconvert.writers.FilesWriter' self.config.merge(self.extra_config)
Add files writer to solution app
## Code Before: from IPython.config.loader import Config from IPython.config.application import catch_config_error from IPython.utils.traitlets import Unicode from nbgrader.apps.customnbconvertapp import CustomNbConvertApp class SolutionApp(CustomNbConvertApp): name = Unicode(u'nbgrader-solution') description = Unicode(u'Prepare a solution version of an assignment') def _export_format_default(self): return 'notebook' def build_extra_config(self): self.extra_config = Config() self.extra_config.Exporter.preprocessors = [ 'nbgrader.preprocessors.IncludeHeaderFooter', 'nbgrader.preprocessors.TableOfContents', 'nbgrader.preprocessors.RenderSolutions', 'nbgrader.preprocessors.ExtractTests', 'IPython.nbconvert.preprocessors.ExecutePreprocessor' ] self.extra_config.RenderSolutions.solution = True self.config.merge(self.extra_config) ## Instruction: Add files writer to solution app ## Code After: from IPython.config.loader import Config from IPython.config.application import catch_config_error from IPython.utils.traitlets import Unicode from nbgrader.apps.customnbconvertapp import CustomNbConvertApp class SolutionApp(CustomNbConvertApp): name = Unicode(u'nbgrader-solution') description = Unicode(u'Prepare a solution version of an assignment') def _export_format_default(self): return 'notebook' def build_extra_config(self): self.extra_config = Config() self.extra_config.Exporter.preprocessors = [ 'nbgrader.preprocessors.IncludeHeaderFooter', 'nbgrader.preprocessors.TableOfContents', 'nbgrader.preprocessors.RenderSolutions', 'nbgrader.preprocessors.ExtractTests', 'IPython.nbconvert.preprocessors.ExecutePreprocessor' ] self.extra_config.RenderSolutions.solution = True self.extra_config.NbGraderApp.writer_class = 'IPython.nbconvert.writers.FilesWriter' self.config.merge(self.extra_config)
// ... existing code ... self.extra_config.RenderSolutions.solution = True self.extra_config.NbGraderApp.writer_class = 'IPython.nbconvert.writers.FilesWriter' self.config.merge(self.extra_config) // ... rest of the code ...
85769162560d83a58ccc92f818559ddd3dce2a09
pages/index.py
pages/index.py
import web from modules.base import renderer from modules.login import loginInstance from modules.courses import Course #Index page class IndexPage: #Simply display the page def GET(self): if loginInstance.isLoggedIn(): userInput = web.input(); if "logoff" in userInput: loginInstance.disconnect(); return renderer.index(False) else: courses = Course.GetAllCoursesIds() return renderer.main(courses) else: return renderer.index(False) #Try to log in def POST(self): userInput = web.input(); if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): return renderer.main() else: return renderer.index(True)
import web from modules.base import renderer from modules.login import loginInstance from modules.courses import Course #Index page class IndexPage: #Simply display the page def GET(self): if loginInstance.isLoggedIn(): userInput = web.input(); if "logoff" in userInput: loginInstance.disconnect(); return renderer.index(False) else: return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(False) #Try to log in def POST(self): userInput = web.input(); if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(True)
Fix another bug in the authentication
Fix another bug in the authentication
Python
agpl-3.0
layus/INGInious,GuillaumeDerval/INGInious,GuillaumeDerval/INGInious,layus/INGInious,layus/INGInious,GuillaumeDerval/INGInious,GuillaumeDerval/INGInious,layus/INGInious
import web from modules.base import renderer from modules.login import loginInstance from modules.courses import Course #Index page class IndexPage: #Simply display the page def GET(self): if loginInstance.isLoggedIn(): userInput = web.input(); if "logoff" in userInput: loginInstance.disconnect(); return renderer.index(False) else: - courses = Course.GetAllCoursesIds() - return renderer.main(courses) + return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(False) #Try to log in def POST(self): userInput = web.input(); if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): - return renderer.main() + return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(True)
Fix another bug in the authentication
## Code Before: import web from modules.base import renderer from modules.login import loginInstance from modules.courses import Course #Index page class IndexPage: #Simply display the page def GET(self): if loginInstance.isLoggedIn(): userInput = web.input(); if "logoff" in userInput: loginInstance.disconnect(); return renderer.index(False) else: courses = Course.GetAllCoursesIds() return renderer.main(courses) else: return renderer.index(False) #Try to log in def POST(self): userInput = web.input(); if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): return renderer.main() else: return renderer.index(True) ## Instruction: Fix another bug in the authentication ## Code After: import web from modules.base import renderer from modules.login import loginInstance from modules.courses import Course #Index page class IndexPage: #Simply display the page def GET(self): if loginInstance.isLoggedIn(): userInput = web.input(); if "logoff" in userInput: loginInstance.disconnect(); return renderer.index(False) else: return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(False) #Try to log in def POST(self): userInput = web.input(); if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): return renderer.main(Course.GetAllCoursesIds()) else: return renderer.index(True)
// ... existing code ... else: return renderer.main(Course.GetAllCoursesIds()) else: // ... modified code ... if "login" in userInput and "password" in userInput and loginInstance.connect(userInput.login,userInput.password): return renderer.main(Course.GetAllCoursesIds()) else: // ... rest of the code ...
f0c7e1b8a2de6f7e9445e2158cf679f399df6545
jupyternotify/jupyternotify.py
jupyternotify/jupyternotify.py
import uuid from IPython.core.getipython import get_ipython from IPython.core.magic import Magics, magics_class, cell_magic from IPython.display import display, Javascript from pkg_resources import resource_filename @magics_class class JupyterNotifyMagics(Magics): def __init__(self, shell): super().__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString)) @cell_magic def notify(self, line, cell): # generate a uuid so that we only deliver this notification once, not again # when the browser reloads (we append a div to check that) notification_uuid = uuid.uuid4() output = get_ipython().run_cell(cell) # display our browser notification using javascript with open(resource_filename("jupyternotify", "js/notify.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString % {"notification_uuid": notification_uuid})) # finally, if we generated an exception, print the traceback if output.error_in_exec is not None: output.raise_error()
import uuid from IPython.core.getipython import get_ipython from IPython.core.magic import Magics, magics_class, cell_magic from IPython.display import display, Javascript from pkg_resources import resource_filename @magics_class class JupyterNotifyMagics(Magics): def __init__(self, shell): super(JupyterNotifyMagics, self).__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString)) @cell_magic def notify(self, line, cell): # generate a uuid so that we only deliver this notification once, not again # when the browser reloads (we append a div to check that) notification_uuid = uuid.uuid4() output = get_ipython().run_cell(cell) # display our browser notification using javascript with open(resource_filename("jupyternotify", "js/notify.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString % {"notification_uuid": notification_uuid})) # finally, if we generated an exception, print the traceback if output.error_in_exec is not None: output.raise_error()
Make this work with python2 too.
Make this work with python2 too.
Python
bsd-3-clause
ShopRunner/jupyter-notify,ShopRunner/jupyter-notify
import uuid from IPython.core.getipython import get_ipython from IPython.core.magic import Magics, magics_class, cell_magic from IPython.display import display, Javascript from pkg_resources import resource_filename @magics_class class JupyterNotifyMagics(Magics): def __init__(self, shell): - super().__init__(shell) + super(JupyterNotifyMagics, self).__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString)) @cell_magic def notify(self, line, cell): # generate a uuid so that we only deliver this notification once, not again # when the browser reloads (we append a div to check that) notification_uuid = uuid.uuid4() output = get_ipython().run_cell(cell) # display our browser notification using javascript with open(resource_filename("jupyternotify", "js/notify.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString % {"notification_uuid": notification_uuid})) # finally, if we generated an exception, print the traceback if output.error_in_exec is not None: output.raise_error()
Make this work with python2 too.
## Code Before: import uuid from IPython.core.getipython import get_ipython from IPython.core.magic import Magics, magics_class, cell_magic from IPython.display import display, Javascript from pkg_resources import resource_filename @magics_class class JupyterNotifyMagics(Magics): def __init__(self, shell): super().__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString)) @cell_magic def notify(self, line, cell): # generate a uuid so that we only deliver this notification once, not again # when the browser reloads (we append a div to check that) notification_uuid = uuid.uuid4() output = get_ipython().run_cell(cell) # display our browser notification using javascript with open(resource_filename("jupyternotify", "js/notify.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString % {"notification_uuid": notification_uuid})) # finally, if we generated an exception, print the traceback if output.error_in_exec is not None: output.raise_error() ## Instruction: Make this work with python2 too. ## Code After: import uuid from IPython.core.getipython import get_ipython from IPython.core.magic import Magics, magics_class, cell_magic from IPython.display import display, Javascript from pkg_resources import resource_filename @magics_class class JupyterNotifyMagics(Magics): def __init__(self, shell): super(JupyterNotifyMagics, self).__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString)) @cell_magic def notify(self, line, cell): # generate a uuid so that we only deliver this notification once, not again # when the browser reloads (we append a div to check that) notification_uuid = uuid.uuid4() output = get_ipython().run_cell(cell) # display our browser notification using javascript with open(resource_filename("jupyternotify", "js/notify.js")) as jsFile: jsString = jsFile.read() display(Javascript(jsString % {"notification_uuid": notification_uuid})) # finally, if we generated an exception, print the traceback if output.error_in_exec is not None: output.raise_error()
... def __init__(self, shell): super(JupyterNotifyMagics, self).__init__(shell) with open(resource_filename("jupyternotify", "js/init.js")) as jsFile: ...
74dfabb565dbd6581a300091c045067d0398e899
source/jormungandr/jormungandr/interfaces/v1/Coverage.py
source/jormungandr/jormungandr/interfaces/v1/Coverage.py
from flask.ext.restful import Resource, fields, marshal_with from jormungandr import i_manager from make_links import add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type from collections import OrderedDict region_fields = { "id": fields.String(attribute="region_id"), "start_production_date": fields.String, "end_production_date": fields.String, "status": fields.String, "shape": fields.String, } regions_fields = OrderedDict([ ("regions", fields.List(fields.Nested(region_fields))) ]) collections = collections_to_resource_type.keys() class Coverage(Resource): @clean_links() @add_coverage_link() @add_collection_links(collections) @marshal_with(regions_fields) def get(self, region=None, lon=None, lat=None): return i_manager.regions(region, lon, lat), 200
from flask.ext.restful import Resource, fields, marshal_with from jormungandr import i_manager from make_links import add_coverage_link, add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type from collections import OrderedDict from fields import NonNullNested region_fields = { "id": fields.String(attribute="region_id"), "start_production_date": fields.String, "end_production_date": fields.String, "status": fields.String, "shape": fields.String, "error": NonNullNested({ "code": fields.String, "value": fields.String }) } regions_fields = OrderedDict([ ("regions", fields.List(fields.Nested(region_fields))) ]) collections = collections_to_resource_type.keys() class Coverage(Resource): @clean_links() @add_coverage_link() @add_collection_links(collections) @marshal_with(regions_fields) def get(self, region=None, lon=None, lat=None): return i_manager.regions(region, lon, lat), 200
Add error field to region
Jormungandr: Add error field to region
Python
agpl-3.0
VincentCATILLON/navitia,prhod/navitia,xlqian/navitia,prhod/navitia,prhod/navitia,xlqian/navitia,ballouche/navitia,is06/navitia,pbougue/navitia,ballouche/navitia,kadhikari/navitia,CanalTP/navitia,VincentCATILLON/navitia,frodrigo/navitia,CanalTP/navitia,pbougue/navitia,francois-vincent/navitia,TeXitoi/navitia,kinnou02/navitia,stifoon/navitia,frodrigo/navitia,Tisseo/navitia,kadhikari/navitia,stifoon/navitia,kinnou02/navitia,kinnou02/navitia,djludo/navitia,kadhikari/navitia,patochectp/navitia,xlqian/navitia,thiphariel/navitia,patochectp/navitia,xlqian/navitia,fueghan/navitia,fueghan/navitia,fueghan/navitia,CanalTP/navitia,lrocheWB/navitia,djludo/navitia,frodrigo/navitia,kadhikari/navitia,francois-vincent/navitia,VincentCATILLON/navitia,datanel/navitia,lrocheWB/navitia,TeXitoi/navitia,francois-vincent/navitia,antoine-de/navitia,ballouche/navitia,is06/navitia,CanalTP/navitia,francois-vincent/navitia,thiphariel/navitia,Tisseo/navitia,lrocheWB/navitia,kinnou02/navitia,VincentCATILLON/navitia,Tisseo/navitia,frodrigo/navitia,antoine-de/navitia,TeXitoi/navitia,patochectp/navitia,TeXitoi/navitia,stifoon/navitia,CanalTP/navitia,pbougue/navitia,antoine-de/navitia,djludo/navitia,is06/navitia,patochectp/navitia,xlqian/navitia,Tisseo/navitia,prhod/navitia,fueghan/navitia,datanel/navitia,djludo/navitia,thiphariel/navitia,lrocheWB/navitia,datanel/navitia,Tisseo/navitia,datanel/navitia,ballouche/navitia,thiphariel/navitia,pbougue/navitia,is06/navitia,antoine-de/navitia,stifoon/navitia
from flask.ext.restful import Resource, fields, marshal_with from jormungandr import i_manager - from make_links import add_coverage_link, add_collection_links, clean_links + from make_links import add_coverage_link, add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type from collections import OrderedDict + from fields import NonNullNested region_fields = { "id": fields.String(attribute="region_id"), "start_production_date": fields.String, "end_production_date": fields.String, "status": fields.String, "shape": fields.String, + "error": NonNullNested({ + "code": fields.String, + "value": fields.String + }) } regions_fields = OrderedDict([ ("regions", fields.List(fields.Nested(region_fields))) ]) collections = collections_to_resource_type.keys() class Coverage(Resource): @clean_links() @add_coverage_link() @add_collection_links(collections) @marshal_with(regions_fields) def get(self, region=None, lon=None, lat=None): return i_manager.regions(region, lon, lat), 200
Add error field to region
## Code Before: from flask.ext.restful import Resource, fields, marshal_with from jormungandr import i_manager from make_links import add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type from collections import OrderedDict region_fields = { "id": fields.String(attribute="region_id"), "start_production_date": fields.String, "end_production_date": fields.String, "status": fields.String, "shape": fields.String, } regions_fields = OrderedDict([ ("regions", fields.List(fields.Nested(region_fields))) ]) collections = collections_to_resource_type.keys() class Coverage(Resource): @clean_links() @add_coverage_link() @add_collection_links(collections) @marshal_with(regions_fields) def get(self, region=None, lon=None, lat=None): return i_manager.regions(region, lon, lat), 200 ## Instruction: Add error field to region ## Code After: from flask.ext.restful import Resource, fields, marshal_with from jormungandr import i_manager from make_links import add_coverage_link, add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type from collections import OrderedDict from fields import NonNullNested region_fields = { "id": fields.String(attribute="region_id"), "start_production_date": fields.String, "end_production_date": fields.String, "status": fields.String, "shape": fields.String, "error": NonNullNested({ "code": fields.String, "value": fields.String }) } regions_fields = OrderedDict([ ("regions", fields.List(fields.Nested(region_fields))) ]) collections = collections_to_resource_type.keys() class Coverage(Resource): @clean_links() @add_coverage_link() @add_collection_links(collections) @marshal_with(regions_fields) def get(self, region=None, lon=None, lat=None): return i_manager.regions(region, lon, lat), 200
... from jormungandr import i_manager from make_links import add_coverage_link, add_coverage_link, add_collection_links, clean_links from converters_collection_type import collections_to_resource_type ... from collections import OrderedDict from fields import NonNullNested ... "shape": fields.String, "error": NonNullNested({ "code": fields.String, "value": fields.String }) } ...
14bd2c0732b5871ac43991a237a8f12a334e982d
sirius/LI_V00/__init__.py
sirius/LI_V00/__init__.py
from . import lattice as _lattice from . import accelerator as _accelerator from . import record_names create_accelerator = accelerator.create_accelerator # -- default accelerator values for LI_V00 -- energy = _lattice._energy single_bunch_charge = _lattice._single_bunch_charge multi_bunch_charge = _lattice._multi_bunch_charge pulse_duration_interval = _lattice._pulse_duration_interval default_optics_mode = _lattice._default_optics_mode.label lattice_version = 'LI_V00' family_data = _lattice._family_data emittance = _lattice._emittance
from . import lattice as _lattice from . import accelerator as _accelerator from . import record_names create_accelerator = accelerator.create_accelerator # -- default accelerator values for LI_V00 -- energy = _lattice._energy single_bunch_charge = _lattice._single_bunch_charge multi_bunch_charge = _lattice._multi_bunch_charge pulse_duration_interval = _lattice._pulse_duration_interval default_optics_mode = _lattice._default_optics_mode.label lattice_version = 'LI_V00' family_data = _lattice._family_data emittance = _lattice._emittance global_coupling = 1.0 # "round" beam
Add parameters of initial beam distribution at LI
Add parameters of initial beam distribution at LI
Python
mit
lnls-fac/sirius
from . import lattice as _lattice from . import accelerator as _accelerator from . import record_names create_accelerator = accelerator.create_accelerator # -- default accelerator values for LI_V00 -- energy = _lattice._energy single_bunch_charge = _lattice._single_bunch_charge multi_bunch_charge = _lattice._multi_bunch_charge pulse_duration_interval = _lattice._pulse_duration_interval default_optics_mode = _lattice._default_optics_mode.label lattice_version = 'LI_V00' family_data = _lattice._family_data - emittance = _lattice._emittance + emittance = _lattice._emittance + global_coupling = 1.0 # "round" beam
Add parameters of initial beam distribution at LI
## Code Before: from . import lattice as _lattice from . import accelerator as _accelerator from . import record_names create_accelerator = accelerator.create_accelerator # -- default accelerator values for LI_V00 -- energy = _lattice._energy single_bunch_charge = _lattice._single_bunch_charge multi_bunch_charge = _lattice._multi_bunch_charge pulse_duration_interval = _lattice._pulse_duration_interval default_optics_mode = _lattice._default_optics_mode.label lattice_version = 'LI_V00' family_data = _lattice._family_data emittance = _lattice._emittance ## Instruction: Add parameters of initial beam distribution at LI ## Code After: from . import lattice as _lattice from . import accelerator as _accelerator from . import record_names create_accelerator = accelerator.create_accelerator # -- default accelerator values for LI_V00 -- energy = _lattice._energy single_bunch_charge = _lattice._single_bunch_charge multi_bunch_charge = _lattice._multi_bunch_charge pulse_duration_interval = _lattice._pulse_duration_interval default_optics_mode = _lattice._default_optics_mode.label lattice_version = 'LI_V00' family_data = _lattice._family_data emittance = _lattice._emittance global_coupling = 1.0 # "round" beam
... family_data = _lattice._family_data emittance = _lattice._emittance global_coupling = 1.0 # "round" beam ...
09f429e76a7b2cd49ea66b70d314bb4510971a5f
gui.py
gui.py
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class MainWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="") win = MainWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main()
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class MainWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="Text Playing Game") self.set_border_width(10) self.set_size_request(500, 400) win = MainWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main()
Set GUI title and size
Set GUI title and size
Python
mit
Giovanni21M/Text-Playing-Game
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class MainWindow(Gtk.Window): def __init__(self): - Gtk.Window.__init__(self, title="") + Gtk.Window.__init__(self, title="Text Playing Game") + self.set_border_width(10) + self.set_size_request(500, 400) win = MainWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main()
Set GUI title and size
## Code Before: import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class MainWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="") win = MainWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main() ## Instruction: Set GUI title and size ## Code After: import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class MainWindow(Gtk.Window): def __init__(self): Gtk.Window.__init__(self, title="Text Playing Game") self.set_border_width(10) self.set_size_request(500, 400) win = MainWindow() win.connect("delete-event", Gtk.main_quit) win.show_all() Gtk.main()
# ... existing code ... def __init__(self): Gtk.Window.__init__(self, title="Text Playing Game") self.set_border_width(10) self.set_size_request(500, 400) # ... rest of the code ...
3a7428723c66010dec1d246beb63be371428d3fe
qipipe/staging/staging_helpers.py
qipipe/staging/staging_helpers.py
"""Pipeline utility functions.""" import re _SSS_REGEX = '(\w+\d{2})/(session\d{2})/(series\d{3})' """The subject/session/series regexp pattern.""" def match_session_hierarchy(path): """ Matches the subject, session and series names from the given input path. @param path: the path to match @return: the matching (subject, session, series) tuple, or None if no match """ match = re.search(_SSS_REGEX, path) if match: return match.groups()
"""Pipeline utility functions.""" import re from .staging_error import StagingError _SSS_REGEX = '(\w+\d{2})/(session\d{2})/(series\d{3})' """The subject/session/series regexp pattern.""" def match_series_hierarchy(path): """ Matches the subject, session and series names from the given input path. @param path: the path to match @return: the matching (subject, session, series) tuple, or None if no match """ match = re.search(_SSS_REGEX, path) if match: return match.groups() else: raise StagingError("The path %s does match the subject/session/series pattern" % path)
Raise error if no match.
Raise error if no match.
Python
bsd-2-clause
ohsu-qin/qipipe
"""Pipeline utility functions.""" import re + from .staging_error import StagingError _SSS_REGEX = '(\w+\d{2})/(session\d{2})/(series\d{3})' """The subject/session/series regexp pattern.""" - def match_session_hierarchy(path): + def match_series_hierarchy(path): """ Matches the subject, session and series names from the given input path. @param path: the path to match @return: the matching (subject, session, series) tuple, or None if no match """ match = re.search(_SSS_REGEX, path) if match: return match.groups() + else: + raise StagingError("The path %s does match the subject/session/series pattern" % path)
Raise error if no match.
## Code Before: """Pipeline utility functions.""" import re _SSS_REGEX = '(\w+\d{2})/(session\d{2})/(series\d{3})' """The subject/session/series regexp pattern.""" def match_session_hierarchy(path): """ Matches the subject, session and series names from the given input path. @param path: the path to match @return: the matching (subject, session, series) tuple, or None if no match """ match = re.search(_SSS_REGEX, path) if match: return match.groups() ## Instruction: Raise error if no match. ## Code After: """Pipeline utility functions.""" import re from .staging_error import StagingError _SSS_REGEX = '(\w+\d{2})/(session\d{2})/(series\d{3})' """The subject/session/series regexp pattern.""" def match_series_hierarchy(path): """ Matches the subject, session and series names from the given input path. @param path: the path to match @return: the matching (subject, session, series) tuple, or None if no match """ match = re.search(_SSS_REGEX, path) if match: return match.groups() else: raise StagingError("The path %s does match the subject/session/series pattern" % path)
// ... existing code ... import re from .staging_error import StagingError // ... modified code ... def match_series_hierarchy(path): """ ... return match.groups() else: raise StagingError("The path %s does match the subject/session/series pattern" % path) // ... rest of the code ...
49f332149ae8a9a3b5faf82bc20b46dfaeb0a3ad
indra/sources/ctd/api.py
indra/sources/ctd/api.py
import pandas from .processor import CTDChemicalDiseaseProcessor, \ CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor base_url = 'http://ctdbase.org/reports/' urls = { 'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz', 'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz', 'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz', } processors = { 'chemical_gene': CTDChemicalGeneProcessor, 'chemical_disease': CTDChemicalDiseaseProcessor, 'gene_disease': CTDGeneDiseaseProcessor, } def process_from_web(subset): if subset not in urls: raise ValueError('%s is not a valid CTD subset.') df = pandas.read_csv(urls[subset], sep='\t', comment='#', header=None) return process_dataframe(df) def process_tsv(fname, subset): df = pandas.read_csv(fname, sep='\t', comment='#', header=None) return process_dataframe(df, subset) def process_dataframe(df, subset): if subset not in processors: raise ValueError('%s is not a valid CTD subset.') cp = processors[subset](df) cp.extract_statements() return cp
import pandas from .processor import CTDChemicalDiseaseProcessor, \ CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor base_url = 'http://ctdbase.org/reports/' urls = { 'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz', 'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz', 'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz', } processors = { 'chemical_gene': CTDChemicalGeneProcessor, 'chemical_disease': CTDChemicalDiseaseProcessor, 'gene_disease': CTDGeneDiseaseProcessor, } def process_from_web(subset, url=None): if subset not in urls: raise ValueError('%s is not a valid CTD subset.' % subset) url = url if url else urls[subset] return _process_url_or_file(url, subset) def process_tsv(fname, subset): return _process_url_or_file(fname, subset) def _process_url_or_file(path, subset): df = pandas.read_csv(path, sep='\t', comment='#', header=None, dtype=str, keep_default_na=False) return process_dataframe(df, subset) def process_dataframe(df, subset): if subset not in processors: raise ValueError('%s is not a valid CTD subset.' % subset) cp = processors[subset](df) cp.extract_statements() return cp
Refactor API to have single pandas load
Refactor API to have single pandas load
Python
bsd-2-clause
sorgerlab/indra,bgyori/indra,johnbachman/indra,bgyori/indra,sorgerlab/belpy,sorgerlab/belpy,johnbachman/belpy,bgyori/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/indra
import pandas from .processor import CTDChemicalDiseaseProcessor, \ CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor base_url = 'http://ctdbase.org/reports/' urls = { 'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz', 'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz', 'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz', } processors = { 'chemical_gene': CTDChemicalGeneProcessor, 'chemical_disease': CTDChemicalDiseaseProcessor, 'gene_disease': CTDGeneDiseaseProcessor, } - def process_from_web(subset): + def process_from_web(subset, url=None): if subset not in urls: - raise ValueError('%s is not a valid CTD subset.') + raise ValueError('%s is not a valid CTD subset.' % subset) + url = url if url else urls[subset] + return _process_url_or_file(url, subset) - df = pandas.read_csv(urls[subset], sep='\t', comment='#', - header=None) - return process_dataframe(df) def process_tsv(fname, subset): + return _process_url_or_file(fname, subset) + + + def _process_url_or_file(path, subset): - df = pandas.read_csv(fname, sep='\t', comment='#', header=None) + df = pandas.read_csv(path, sep='\t', comment='#', + header=None, dtype=str, keep_default_na=False) return process_dataframe(df, subset) def process_dataframe(df, subset): if subset not in processors: - raise ValueError('%s is not a valid CTD subset.') + raise ValueError('%s is not a valid CTD subset.' % subset) cp = processors[subset](df) cp.extract_statements() return cp
Refactor API to have single pandas load
## Code Before: import pandas from .processor import CTDChemicalDiseaseProcessor, \ CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor base_url = 'http://ctdbase.org/reports/' urls = { 'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz', 'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz', 'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz', } processors = { 'chemical_gene': CTDChemicalGeneProcessor, 'chemical_disease': CTDChemicalDiseaseProcessor, 'gene_disease': CTDGeneDiseaseProcessor, } def process_from_web(subset): if subset not in urls: raise ValueError('%s is not a valid CTD subset.') df = pandas.read_csv(urls[subset], sep='\t', comment='#', header=None) return process_dataframe(df) def process_tsv(fname, subset): df = pandas.read_csv(fname, sep='\t', comment='#', header=None) return process_dataframe(df, subset) def process_dataframe(df, subset): if subset not in processors: raise ValueError('%s is not a valid CTD subset.') cp = processors[subset](df) cp.extract_statements() return cp ## Instruction: Refactor API to have single pandas load ## Code After: import pandas from .processor import CTDChemicalDiseaseProcessor, \ CTDGeneDiseaseProcessor, CTDChemicalGeneProcessor base_url = 'http://ctdbase.org/reports/' urls = { 'chemical_gene': base_url + 'CTD_chem_gene_ixns.tsv.gz', 'chemical_disease': base_url + 'CTD_chemicals_diseases.tsv.gz', 'gene_disease': base_url + 'CTD_genes_diseases.tsv.gz', } processors = { 'chemical_gene': CTDChemicalGeneProcessor, 'chemical_disease': CTDChemicalDiseaseProcessor, 'gene_disease': CTDGeneDiseaseProcessor, } def process_from_web(subset, url=None): if subset not in urls: raise ValueError('%s is not a valid CTD subset.' % subset) url = url if url else urls[subset] return _process_url_or_file(url, subset) def process_tsv(fname, subset): return _process_url_or_file(fname, subset) def _process_url_or_file(path, subset): df = pandas.read_csv(path, sep='\t', comment='#', header=None, dtype=str, keep_default_na=False) return process_dataframe(df, subset) def process_dataframe(df, subset): if subset not in processors: raise ValueError('%s is not a valid CTD subset.' % subset) cp = processors[subset](df) cp.extract_statements() return cp
... def process_from_web(subset, url=None): if subset not in urls: raise ValueError('%s is not a valid CTD subset.' % subset) url = url if url else urls[subset] return _process_url_or_file(url, subset) ... def process_tsv(fname, subset): return _process_url_or_file(fname, subset) def _process_url_or_file(path, subset): df = pandas.read_csv(path, sep='\t', comment='#', header=None, dtype=str, keep_default_na=False) return process_dataframe(df, subset) ... if subset not in processors: raise ValueError('%s is not a valid CTD subset.' % subset) cp = processors[subset](df) ...
2607d142a32ad31fd4c432c0830c3173daee79fb
src/util/results.py
src/util/results.py
import datetime from context_manager.db_context_manager import DBContextManager from util.constants import QUERIES def export_results(data, controller_name, trajectory_name, database_path): creation_datetime = datetime.datetime.now() with DBContextManager(database_path) as cursor: table_name = ('_'.join([controller_name, trajectory_name, creation_datetime.strftime('%Y_%m_%d_%H_%M_%S')])) cursor.execute(QUERIES['create_sims']) cursor.execute(QUERIES['insert_sim'], (table_name, creation_datetime)) cursor.execute(QUERIES['create_sim'].format(table_name)) for i in range(len(data['t'])): cursor.execute( QUERIES['insert_data'].format(table_name), (data['t'][i], data['x'][i], data['x_ref'][i], data['y'][i], data['y_ref'][i], data['theta'][i], data['theta_ref'][i], data['v_c'][i], data['w_c'][i]) )
import datetime from context_manager.db_context_manager import DBContextManager from util.constants import QUERIES def export_results(data, controller_name, trajectory_name, database_path): def get_table_name(controller, trajectory, date_time): return '_'.join([controller, trajectory, date_time.strftime('%Y_%m_%d_%H_%M_%S')]) date_time = datetime.datetime.now() with DBContextManager(database_path) as cursor: table_name = get_table_name( controller_name, trajectory_name, date_time ) cursor.execute(QUERIES['create_sims']) cursor.execute(QUERIES['insert_sim'], (table_name, date_time)) cursor.execute(QUERIES['create_sim'].format(table_name)) for i in range(len(data['t'])): cursor.execute( QUERIES['insert_data'].format(table_name), (data['t'][i], data['x'][i], data['x_ref'][i], data['y'][i], data['y_ref'][i], data['theta'][i], data['theta_ref'][i], data['v_c'][i], data['w_c'][i]) )
Create inner function and rename variables
refactor: Create inner function and rename variables
Python
mit
bit0001/trajectory_tracking,bit0001/trajectory_tracking
import datetime from context_manager.db_context_manager import DBContextManager from util.constants import QUERIES def export_results(data, controller_name, trajectory_name, database_path): + def get_table_name(controller, trajectory, date_time): + return '_'.join([controller, + trajectory, + date_time.strftime('%Y_%m_%d_%H_%M_%S')]) + - creation_datetime = datetime.datetime.now() + date_time = datetime.datetime.now() with DBContextManager(database_path) as cursor: - table_name = ('_'.join([controller_name, trajectory_name, creation_datetime.strftime('%Y_%m_%d_%H_%M_%S')])) + table_name = get_table_name( + controller_name, + trajectory_name, + date_time + ) + cursor.execute(QUERIES['create_sims']) - - cursor.execute(QUERIES['insert_sim'], (table_name, creation_datetime)) + cursor.execute(QUERIES['insert_sim'], (table_name, date_time)) - cursor.execute(QUERIES['create_sim'].format(table_name)) for i in range(len(data['t'])): cursor.execute( QUERIES['insert_data'].format(table_name), (data['t'][i], data['x'][i], data['x_ref'][i], data['y'][i], data['y_ref'][i], data['theta'][i], data['theta_ref'][i], data['v_c'][i], data['w_c'][i]) )
Create inner function and rename variables
## Code Before: import datetime from context_manager.db_context_manager import DBContextManager from util.constants import QUERIES def export_results(data, controller_name, trajectory_name, database_path): creation_datetime = datetime.datetime.now() with DBContextManager(database_path) as cursor: table_name = ('_'.join([controller_name, trajectory_name, creation_datetime.strftime('%Y_%m_%d_%H_%M_%S')])) cursor.execute(QUERIES['create_sims']) cursor.execute(QUERIES['insert_sim'], (table_name, creation_datetime)) cursor.execute(QUERIES['create_sim'].format(table_name)) for i in range(len(data['t'])): cursor.execute( QUERIES['insert_data'].format(table_name), (data['t'][i], data['x'][i], data['x_ref'][i], data['y'][i], data['y_ref'][i], data['theta'][i], data['theta_ref'][i], data['v_c'][i], data['w_c'][i]) ) ## Instruction: Create inner function and rename variables ## Code After: import datetime from context_manager.db_context_manager import DBContextManager from util.constants import QUERIES def export_results(data, controller_name, trajectory_name, database_path): def get_table_name(controller, trajectory, date_time): return '_'.join([controller, trajectory, date_time.strftime('%Y_%m_%d_%H_%M_%S')]) date_time = datetime.datetime.now() with DBContextManager(database_path) as cursor: table_name = get_table_name( controller_name, trajectory_name, date_time ) cursor.execute(QUERIES['create_sims']) cursor.execute(QUERIES['insert_sim'], (table_name, date_time)) cursor.execute(QUERIES['create_sim'].format(table_name)) for i in range(len(data['t'])): cursor.execute( QUERIES['insert_data'].format(table_name), (data['t'][i], data['x'][i], data['x_ref'][i], data['y'][i], data['y_ref'][i], data['theta'][i], data['theta_ref'][i], data['v_c'][i], data['w_c'][i]) )
// ... existing code ... def export_results(data, controller_name, trajectory_name, database_path): def get_table_name(controller, trajectory, date_time): return '_'.join([controller, trajectory, date_time.strftime('%Y_%m_%d_%H_%M_%S')]) date_time = datetime.datetime.now() // ... modified code ... with DBContextManager(database_path) as cursor: table_name = get_table_name( controller_name, trajectory_name, date_time ) cursor.execute(QUERIES['create_sims']) cursor.execute(QUERIES['insert_sim'], (table_name, date_time)) cursor.execute(QUERIES['create_sim'].format(table_name)) // ... rest of the code ...
95186f684328d5b84611f405d47d474c53cad619
cat.py
cat.py
import io import aiohttp from discord.ext import commands import yaml class Cat: def __init__(self, bot): self.bot = bot with open('config.yaml') as file: data = yaml.load(file) self.key = data.get('cat_key', '') self.url = 'http://thecatapi.com/api/images/get' self.params = {'api_key': self.key, 'type': 'png,jpg'} @commands.command() async def cat(self, ctx): s = self.bot.session async with ctx.typing(), s.get(self.url, params=self.params) as resp: image = io.BytesIO(await resp.content.read()) ext = resp.headers['Content-Type'].partition('/')[2] await ctx.send(file=image, filename=f'{ctx.message.id}.{ext}') def setup(bot): bot.add_cog(Cat(bot))
import io import aiohttp import discord from discord.ext import commands from lxml import etree import yaml class Cat: def __init__(self, bot): self.bot = bot with open('config.yaml') as file: data = yaml.load(file) self.key = data.get('cat_key', '') self.url = 'http://thecatapi.com/api/images/get' self.params = {'api_key': self.key, 'type': 'png,jpg', 'format': 'xml', } @commands.command() async def cat(self, ctx): session = self.bot.session async with ctx.typing(): async with session.get(self.url, params=self.params) as resp: root = etree.fromstring(await resp.text()) url = root.find('.//url').text embed = discord.Embed() embed.set_image(url=url) await ctx.send(embed=embed) def setup(bot): bot.add_cog(Cat(bot))
Send image in embed because aiohttp doesn't know how to parse links
Send image in embed because aiohttp doesn't know how to parse links
Python
mit
BeatButton/beattie,BeatButton/beattie-bot
import io import aiohttp + import discord from discord.ext import commands + from lxml import etree import yaml class Cat: def __init__(self, bot): self.bot = bot with open('config.yaml') as file: data = yaml.load(file) self.key = data.get('cat_key', '') self.url = 'http://thecatapi.com/api/images/get' self.params = {'api_key': self.key, - 'type': 'png,jpg'} + 'type': 'png,jpg', + 'format': 'xml', + } @commands.command() async def cat(self, ctx): - s = self.bot.session + session = self.bot.session + async with ctx.typing(): - async with ctx.typing(), s.get(self.url, params=self.params) as resp: + async with session.get(self.url, params=self.params) as resp: - image = io.BytesIO(await resp.content.read()) - ext = resp.headers['Content-Type'].partition('/')[2] - await ctx.send(file=image, filename=f'{ctx.message.id}.{ext}') + root = etree.fromstring(await resp.text()) + url = root.find('.//url').text + embed = discord.Embed() + embed.set_image(url=url) + await ctx.send(embed=embed) + def setup(bot): bot.add_cog(Cat(bot))
Send image in embed because aiohttp doesn't know how to parse links
## Code Before: import io import aiohttp from discord.ext import commands import yaml class Cat: def __init__(self, bot): self.bot = bot with open('config.yaml') as file: data = yaml.load(file) self.key = data.get('cat_key', '') self.url = 'http://thecatapi.com/api/images/get' self.params = {'api_key': self.key, 'type': 'png,jpg'} @commands.command() async def cat(self, ctx): s = self.bot.session async with ctx.typing(), s.get(self.url, params=self.params) as resp: image = io.BytesIO(await resp.content.read()) ext = resp.headers['Content-Type'].partition('/')[2] await ctx.send(file=image, filename=f'{ctx.message.id}.{ext}') def setup(bot): bot.add_cog(Cat(bot)) ## Instruction: Send image in embed because aiohttp doesn't know how to parse links ## Code After: import io import aiohttp import discord from discord.ext import commands from lxml import etree import yaml class Cat: def __init__(self, bot): self.bot = bot with open('config.yaml') as file: data = yaml.load(file) self.key = data.get('cat_key', '') self.url = 'http://thecatapi.com/api/images/get' self.params = {'api_key': self.key, 'type': 'png,jpg', 'format': 'xml', } @commands.command() async def cat(self, ctx): session = self.bot.session async with ctx.typing(): async with session.get(self.url, params=self.params) as resp: root = etree.fromstring(await resp.text()) url = root.find('.//url').text embed = discord.Embed() embed.set_image(url=url) await ctx.send(embed=embed) def setup(bot): bot.add_cog(Cat(bot))
... import aiohttp import discord from discord.ext import commands from lxml import etree import yaml ... self.params = {'api_key': self.key, 'type': 'png,jpg', 'format': 'xml', } ... async def cat(self, ctx): session = self.bot.session async with ctx.typing(): async with session.get(self.url, params=self.params) as resp: root = etree.fromstring(await resp.text()) url = root.find('.//url').text embed = discord.Embed() embed.set_image(url=url) await ctx.send(embed=embed) ...
59789bae7df5de6d7568a1b372b95a891fd5c3a2
confluent_server/confluent/userutil.py
confluent_server/confluent/userutil.py
from ctypes import * from ctypes.util import find_library import grp import pwd import os libc = cdll.LoadLibrary(find_library('libc')) _getgrouplist = libc.getgrouplist _getgrouplist.restype = c_int32 class TooSmallException(Exception): def __init__(self, count): self.count = count super(TooSmallException, self).__init__() def getgrouplist(name, gid, ng=32): _getgrouplist.argtypes = [c_char_p, c_uint, POINTER(c_uint * ng), POINTER(c_int)] glist = (c_uint * ng)() nglist = c_int(ng) count = _getgrouplist(name, gid, byref(glist), byref(nglist)) if count < 0: raise TooSmallException(nglist.value) for gidx in range(count): gent = glist[gidx] yield grp.getgrgid(gent).gr_name def grouplist(username): pent = pwd.getpwnam(username) try: groups = getgrouplist(pent.pw_name, pent.pw_gid) except TooSmallException as e: groups = getgrouplist(pent.pw_name, pent.pw_gid, e.count) return list(groups) if __name__ == '__main__': import sys print(repr(grouplist(sys.argv[1])))
from ctypes import * from ctypes.util import find_library import grp import pwd import os libc = cdll.LoadLibrary(find_library('libc')) _getgrouplist = libc.getgrouplist _getgrouplist.restype = c_int32 class TooSmallException(Exception): def __init__(self, count): self.count = count super(TooSmallException, self).__init__() def getgrouplist(name, gid, ng=32): _getgrouplist.argtypes = [c_char_p, c_uint, POINTER(c_uint * ng), POINTER(c_int)] glist = (c_uint * ng)() nglist = c_int(ng) if not isinstance(name, bytes): name = name.encode('utf-8') count = _getgrouplist(name, gid, byref(glist), byref(nglist)) if count < 0: raise TooSmallException(nglist.value) for gidx in range(count): gent = glist[gidx] yield grp.getgrgid(gent).gr_name def grouplist(username): pent = pwd.getpwnam(username) try: groups = getgrouplist(pent.pw_name, pent.pw_gid) except TooSmallException as e: groups = getgrouplist(pent.pw_name, pent.pw_gid, e.count) return list(groups) if __name__ == '__main__': import sys print(repr(grouplist(sys.argv[1])))
Fix python3 ctypes str usage
Fix python3 ctypes str usage In python3, the string is likely to be unicode and incompatible with the libc function. If it isn't bytes, force it to be bytes.
Python
apache-2.0
xcat2/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,jjohnson42/confluent,jjohnson42/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent,jjohnson42/confluent
from ctypes import * from ctypes.util import find_library import grp import pwd import os libc = cdll.LoadLibrary(find_library('libc')) _getgrouplist = libc.getgrouplist _getgrouplist.restype = c_int32 class TooSmallException(Exception): def __init__(self, count): self.count = count super(TooSmallException, self).__init__() def getgrouplist(name, gid, ng=32): _getgrouplist.argtypes = [c_char_p, c_uint, POINTER(c_uint * ng), POINTER(c_int)] glist = (c_uint * ng)() nglist = c_int(ng) + if not isinstance(name, bytes): + name = name.encode('utf-8') count = _getgrouplist(name, gid, byref(glist), byref(nglist)) if count < 0: raise TooSmallException(nglist.value) for gidx in range(count): gent = glist[gidx] yield grp.getgrgid(gent).gr_name def grouplist(username): pent = pwd.getpwnam(username) try: groups = getgrouplist(pent.pw_name, pent.pw_gid) except TooSmallException as e: groups = getgrouplist(pent.pw_name, pent.pw_gid, e.count) return list(groups) if __name__ == '__main__': import sys print(repr(grouplist(sys.argv[1])))
Fix python3 ctypes str usage
## Code Before: from ctypes import * from ctypes.util import find_library import grp import pwd import os libc = cdll.LoadLibrary(find_library('libc')) _getgrouplist = libc.getgrouplist _getgrouplist.restype = c_int32 class TooSmallException(Exception): def __init__(self, count): self.count = count super(TooSmallException, self).__init__() def getgrouplist(name, gid, ng=32): _getgrouplist.argtypes = [c_char_p, c_uint, POINTER(c_uint * ng), POINTER(c_int)] glist = (c_uint * ng)() nglist = c_int(ng) count = _getgrouplist(name, gid, byref(glist), byref(nglist)) if count < 0: raise TooSmallException(nglist.value) for gidx in range(count): gent = glist[gidx] yield grp.getgrgid(gent).gr_name def grouplist(username): pent = pwd.getpwnam(username) try: groups = getgrouplist(pent.pw_name, pent.pw_gid) except TooSmallException as e: groups = getgrouplist(pent.pw_name, pent.pw_gid, e.count) return list(groups) if __name__ == '__main__': import sys print(repr(grouplist(sys.argv[1]))) ## Instruction: Fix python3 ctypes str usage ## Code After: from ctypes import * from ctypes.util import find_library import grp import pwd import os libc = cdll.LoadLibrary(find_library('libc')) _getgrouplist = libc.getgrouplist _getgrouplist.restype = c_int32 class TooSmallException(Exception): def __init__(self, count): self.count = count super(TooSmallException, self).__init__() def getgrouplist(name, gid, ng=32): _getgrouplist.argtypes = [c_char_p, c_uint, POINTER(c_uint * ng), POINTER(c_int)] glist = (c_uint * ng)() nglist = c_int(ng) if not isinstance(name, bytes): name = name.encode('utf-8') count = _getgrouplist(name, gid, byref(glist), byref(nglist)) if count < 0: raise TooSmallException(nglist.value) for gidx in range(count): gent = glist[gidx] yield grp.getgrgid(gent).gr_name def grouplist(username): pent = pwd.getpwnam(username) try: groups = getgrouplist(pent.pw_name, pent.pw_gid) except TooSmallException as e: groups = getgrouplist(pent.pw_name, pent.pw_gid, e.count) return list(groups) if __name__ == '__main__': import sys print(repr(grouplist(sys.argv[1])))
# ... existing code ... nglist = c_int(ng) if not isinstance(name, bytes): name = name.encode('utf-8') count = _getgrouplist(name, gid, byref(glist), byref(nglist)) # ... rest of the code ...
4065f8edc61ae9078238219dad674ae114c78003
moocng/wsgi.py
moocng/wsgi.py
import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moocng.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application)
import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moocng.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. #from django.core.wsgi import get_wsgi_application #application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) def application(environ, start_response): virtualenv = environ.get('VIRTUALENV', '/var/www') activate_this = os.path.join(virtualenv, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) from django.core.wsgi import get_wsgi_application django_app = get_wsgi_application() return django_app(environ, start_response)
Allow to configure the virtualenv path from the Apache configuration
Allow to configure the virtualenv path from the Apache configuration
Python
apache-2.0
OpenMOOC/moocng,GeographicaGS/moocng,GeographicaGS/moocng,GeographicaGS/moocng,OpenMOOC/moocng,GeographicaGS/moocng
import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moocng.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. - from django.core.wsgi import get_wsgi_application + #from django.core.wsgi import get_wsgi_application - application = get_wsgi_application() + #application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) + def application(environ, start_response): + virtualenv = environ.get('VIRTUALENV', '/var/www') + activate_this = os.path.join(virtualenv, 'bin', 'activate_this.py') + execfile(activate_this, dict(__file__=activate_this)) + + from django.core.wsgi import get_wsgi_application + django_app = get_wsgi_application() + return django_app(environ, start_response) +
Allow to configure the virtualenv path from the Apache configuration
## Code Before: import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moocng.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. from django.core.wsgi import get_wsgi_application application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) ## Instruction: Allow to configure the virtualenv path from the Apache configuration ## Code After: import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "moocng.settings") # This application object is used by any WSGI server configured to use this # file. This includes Django's development server, if the WSGI_APPLICATION # setting points here. #from django.core.wsgi import get_wsgi_application #application = get_wsgi_application() # Apply WSGI middleware here. # from helloworld.wsgi import HelloWorldApplication # application = HelloWorldApplication(application) def application(environ, start_response): virtualenv = environ.get('VIRTUALENV', '/var/www') activate_this = os.path.join(virtualenv, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) from django.core.wsgi import get_wsgi_application django_app = get_wsgi_application() return django_app(environ, start_response)
// ... existing code ... # setting points here. #from django.core.wsgi import get_wsgi_application #application = get_wsgi_application() // ... modified code ... # application = HelloWorldApplication(application) def application(environ, start_response): virtualenv = environ.get('VIRTUALENV', '/var/www') activate_this = os.path.join(virtualenv, 'bin', 'activate_this.py') execfile(activate_this, dict(__file__=activate_this)) from django.core.wsgi import get_wsgi_application django_app = get_wsgi_application() return django_app(environ, start_response) // ... rest of the code ...
f0cb99f5e986c11164c98eeea38ce54e91748833
tests/grammar_unified_tests.py
tests/grammar_unified_tests.py
from unittest import TestCase from regparser.grammar.unified import * class GrammarCommonTests(TestCase): def test_depth1_p(self): text = '(c)(2)(ii)(A)(<E T="03">2</E>)' result = depth1_p.parseString(text) self.assertEqual('c', result.p1) self.assertEqual('2', result.p2) self.assertEqual('ii', result.p3) self.assertEqual('A', result.p4) self.assertEqual('2', result.p5) def test_marker_subpart_title(self): # Typical case: text = u'Subpart K\u2014Exportation' result = marker_subpart_title.parseString(text) self.assertEqual(u'Exportation', result.subpart_title) self.assertEqual(u'K', result.subpart) # Reserved subpart: text = u'Subpart J [Reserved]' result = marker_subpart_title.parseString(text) self.assertEqual(u'[Reserved]', result.subpart_title) self.assertEqual(u'J', result.subpart)
from unittest import TestCase from regparser.grammar.unified import * class GrammarCommonTests(TestCase): def test_depth1_p(self): text = '(c)(2)(ii)(A)(<E T="03">2</E>)' result = depth1_p.parseString(text) self.assertEqual('c', result.p1) self.assertEqual('2', result.p2) self.assertEqual('ii', result.p3) self.assertEqual('A', result.p4) self.assertEqual('2', result.p5) def test_marker_subpart_title(self): # Typical case: text = u'Subpart K\u2014Exportation' result = marker_subpart_title.parseString(text) self.assertEqual(u'Exportation', result.subpart_title) self.assertEqual(u'K', result.subpart) # Reserved subpart: text = u'Subpart J [Reserved]' result = marker_subpart_title.parseString(text) self.assertEqual(u'[Reserved]', result.subpart_title) self.assertEqual(u'J', result.subpart) def test_marker_comment(self): texts = [u'comment § 1004.3-4-i', u'comment 1004.3-4-i', u'comment 3-4-i',] for t in texts: result = marker_comment.parseString(t) self.assertEqual("3", result.section) self.assertEqual("4", result.c1)
Add tests for marker_comment from ascott1/appendix-ref
Add tests for marker_comment from ascott1/appendix-ref Conflicts: tests/grammar_unified_tests.py
Python
cc0-1.0
tadhg-ohiggins/regulations-parser,eregs/regulations-parser,tadhg-ohiggins/regulations-parser,eregs/regulations-parser,cmc333333/regulations-parser,cmc333333/regulations-parser
from unittest import TestCase from regparser.grammar.unified import * class GrammarCommonTests(TestCase): def test_depth1_p(self): text = '(c)(2)(ii)(A)(<E T="03">2</E>)' result = depth1_p.parseString(text) self.assertEqual('c', result.p1) self.assertEqual('2', result.p2) self.assertEqual('ii', result.p3) self.assertEqual('A', result.p4) self.assertEqual('2', result.p5) - def test_marker_subpart_title(self): # Typical case: text = u'Subpart K\u2014Exportation' result = marker_subpart_title.parseString(text) self.assertEqual(u'Exportation', result.subpart_title) self.assertEqual(u'K', result.subpart) # Reserved subpart: text = u'Subpart J [Reserved]' result = marker_subpart_title.parseString(text) self.assertEqual(u'[Reserved]', result.subpart_title) self.assertEqual(u'J', result.subpart) + def test_marker_comment(self): + texts = [u'comment § 1004.3-4-i', + u'comment 1004.3-4-i', + u'comment 3-4-i',] + for t in texts: + result = marker_comment.parseString(t) + self.assertEqual("3", result.section) + self.assertEqual("4", result.c1)
Add tests for marker_comment from ascott1/appendix-ref
## Code Before: from unittest import TestCase from regparser.grammar.unified import * class GrammarCommonTests(TestCase): def test_depth1_p(self): text = '(c)(2)(ii)(A)(<E T="03">2</E>)' result = depth1_p.parseString(text) self.assertEqual('c', result.p1) self.assertEqual('2', result.p2) self.assertEqual('ii', result.p3) self.assertEqual('A', result.p4) self.assertEqual('2', result.p5) def test_marker_subpart_title(self): # Typical case: text = u'Subpart K\u2014Exportation' result = marker_subpart_title.parseString(text) self.assertEqual(u'Exportation', result.subpart_title) self.assertEqual(u'K', result.subpart) # Reserved subpart: text = u'Subpart J [Reserved]' result = marker_subpart_title.parseString(text) self.assertEqual(u'[Reserved]', result.subpart_title) self.assertEqual(u'J', result.subpart) ## Instruction: Add tests for marker_comment from ascott1/appendix-ref ## Code After: from unittest import TestCase from regparser.grammar.unified import * class GrammarCommonTests(TestCase): def test_depth1_p(self): text = '(c)(2)(ii)(A)(<E T="03">2</E>)' result = depth1_p.parseString(text) self.assertEqual('c', result.p1) self.assertEqual('2', result.p2) self.assertEqual('ii', result.p3) self.assertEqual('A', result.p4) self.assertEqual('2', result.p5) def test_marker_subpart_title(self): # Typical case: text = u'Subpart K\u2014Exportation' result = marker_subpart_title.parseString(text) self.assertEqual(u'Exportation', result.subpart_title) self.assertEqual(u'K', result.subpart) # Reserved subpart: text = u'Subpart J [Reserved]' result = marker_subpart_title.parseString(text) self.assertEqual(u'[Reserved]', result.subpart_title) self.assertEqual(u'J', result.subpart) def test_marker_comment(self): texts = [u'comment § 1004.3-4-i', u'comment 1004.3-4-i', u'comment 3-4-i',] for t in texts: result = marker_comment.parseString(t) self.assertEqual("3", result.section) self.assertEqual("4", result.c1)
# ... existing code ... def test_marker_subpart_title(self): # ... modified code ... def test_marker_comment(self): texts = [u'comment § 1004.3-4-i', u'comment 1004.3-4-i', u'comment 3-4-i',] for t in texts: result = marker_comment.parseString(t) self.assertEqual("3", result.section) self.assertEqual("4", result.c1) # ... rest of the code ...
0bd1865730106d2573acb04d95b23290e935f4c4
util.py
util.py
from math import sin, cos, asin, sqrt def hav(lona, lonb, lata, latb): # ported from latlontools # assume latitude and longitudes are in radians diff_lat = lata - latb diff_lon = lona - lonb a = sin(diff_lat/2)**2 + cos(lona) * cos(latb) * sin(diff_lon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # radius of earth in km return c * r
from math import sin, cos, asin, sqrt def hav(lonlata, lonlatb): # ported from latlontools # assume latitude and longitudes are in radians lona = lonlata[0] lata = lonlata[1] lonb = lonlatb[0] latb = lonlatb[1] diff_lat = lata - latb diff_lon = lona - lonb a = sin(diff_lat/2)**2 + cos(lona) * cos(latb) * sin(diff_lon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # radius of earth in km return c * r
Change call signature of hav to 2 pairs
Change call signature of hav to 2 pairs
Python
bsd-3-clause
LemonPi/Pathtreker,LemonPi/Pathtreker,LemonPi/Pathtreker
from math import sin, cos, asin, sqrt - def hav(lona, lonb, lata, latb): + def hav(lonlata, lonlatb): # ported from latlontools # assume latitude and longitudes are in radians + lona = lonlata[0] + lata = lonlata[1] + + lonb = lonlatb[0] + latb = lonlatb[1] + diff_lat = lata - latb diff_lon = lona - lonb a = sin(diff_lat/2)**2 + cos(lona) * cos(latb) * sin(diff_lon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # radius of earth in km return c * r
Change call signature of hav to 2 pairs
## Code Before: from math import sin, cos, asin, sqrt def hav(lona, lonb, lata, latb): # ported from latlontools # assume latitude and longitudes are in radians diff_lat = lata - latb diff_lon = lona - lonb a = sin(diff_lat/2)**2 + cos(lona) * cos(latb) * sin(diff_lon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # radius of earth in km return c * r ## Instruction: Change call signature of hav to 2 pairs ## Code After: from math import sin, cos, asin, sqrt def hav(lonlata, lonlatb): # ported from latlontools # assume latitude and longitudes are in radians lona = lonlata[0] lata = lonlata[1] lonb = lonlatb[0] latb = lonlatb[1] diff_lat = lata - latb diff_lon = lona - lonb a = sin(diff_lat/2)**2 + cos(lona) * cos(latb) * sin(diff_lon/2)**2 c = 2 * asin(sqrt(a)) r = 6371 # radius of earth in km return c * r
... def hav(lonlata, lonlatb): # ported from latlontools ... # assume latitude and longitudes are in radians lona = lonlata[0] lata = lonlata[1] lonb = lonlatb[0] latb = lonlatb[1] diff_lat = lata - latb ...
e32d470ba47973f3827a5f85e701826bbb08b621
setup.py
setup.py
from distutils.core import setup setup( name='lcinvestor', version=open('lcinvestor/VERSION').read(), author='Jeremy Gillick', author_email='[email protected]', packages=['lcinvestor', 'lcinvestor.tests', 'lcinvestor.settings'], package_data={ 'lcinvestor': ['VERSION'], 'lcinvestor.settings': ['settings.yaml'] }, scripts=['bin/lcinvestor'], url='https://github.com/jgillick/LendingClubAutoInvestor', license=open('LICENSE.txt').read(), description='A simple tool that will watch your LendingClub account and automatically invest cash as it becomes available.', long_description=open('README.rst').read(), install_requires=[ "lendingclub >= 0.1.2", # "python-daemon >= 1.6", "argparse >= 1.2.1", "pybars >= 0.0.4", "pyyaml >= 3.09", "pause >= 0.1.2" ], platforms='osx, posix, linux, windows', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Topic :: Office/Business :: Financial' ], keywords='lendingclub investing daemon' )
from distutils.core import setup setup( name='lcinvestor', version=open('lcinvestor/VERSION').read(), author='Jeremy Gillick', author_email='[email protected]', packages=['lcinvestor', 'lcinvestor.tests', 'lcinvestor.settings'], package_data={ 'lcinvestor': ['VERSION'], 'lcinvestor.settings': ['settings.yaml'] }, scripts=['bin/lcinvestor', 'bin/lcinvestor.bat'], url='https://github.com/jgillick/LendingClubAutoInvestor', license=open('LICENSE.txt').read(), description='A simple tool that will watch your LendingClub account and automatically invest cash as it becomes available.', long_description=open('README.rst').read(), install_requires=[ "lendingclub >= 0.1.3", # "python-daemon >= 1.6", "argparse >= 1.2.1", "pyyaml >= 3.09", "pause >= 0.1.2" ], platforms='osx, posix, linux, windows', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Topic :: Office/Business :: Financial' ], keywords='lendingclub investing daemon' )
Update requirements. Add bat file
Update requirements. Add bat file
Python
mit
jgillick/LendingClubAutoInvestor,ilyakatz/LendingClubAutoInvestor,ilyakatz/LendingClubAutoInvestor
from distutils.core import setup setup( name='lcinvestor', version=open('lcinvestor/VERSION').read(), author='Jeremy Gillick', author_email='[email protected]', packages=['lcinvestor', 'lcinvestor.tests', 'lcinvestor.settings'], package_data={ 'lcinvestor': ['VERSION'], 'lcinvestor.settings': ['settings.yaml'] }, - scripts=['bin/lcinvestor'], + scripts=['bin/lcinvestor', 'bin/lcinvestor.bat'], url='https://github.com/jgillick/LendingClubAutoInvestor', license=open('LICENSE.txt').read(), description='A simple tool that will watch your LendingClub account and automatically invest cash as it becomes available.', long_description=open('README.rst').read(), install_requires=[ - "lendingclub >= 0.1.2", + "lendingclub >= 0.1.3", # "python-daemon >= 1.6", "argparse >= 1.2.1", - "pybars >= 0.0.4", "pyyaml >= 3.09", "pause >= 0.1.2" ], platforms='osx, posix, linux, windows', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Topic :: Office/Business :: Financial' ], keywords='lendingclub investing daemon' )
Update requirements. Add bat file
## Code Before: from distutils.core import setup setup( name='lcinvestor', version=open('lcinvestor/VERSION').read(), author='Jeremy Gillick', author_email='[email protected]', packages=['lcinvestor', 'lcinvestor.tests', 'lcinvestor.settings'], package_data={ 'lcinvestor': ['VERSION'], 'lcinvestor.settings': ['settings.yaml'] }, scripts=['bin/lcinvestor'], url='https://github.com/jgillick/LendingClubAutoInvestor', license=open('LICENSE.txt').read(), description='A simple tool that will watch your LendingClub account and automatically invest cash as it becomes available.', long_description=open('README.rst').read(), install_requires=[ "lendingclub >= 0.1.2", # "python-daemon >= 1.6", "argparse >= 1.2.1", "pybars >= 0.0.4", "pyyaml >= 3.09", "pause >= 0.1.2" ], platforms='osx, posix, linux, windows', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Topic :: Office/Business :: Financial' ], keywords='lendingclub investing daemon' ) ## Instruction: Update requirements. Add bat file ## Code After: from distutils.core import setup setup( name='lcinvestor', version=open('lcinvestor/VERSION').read(), author='Jeremy Gillick', author_email='[email protected]', packages=['lcinvestor', 'lcinvestor.tests', 'lcinvestor.settings'], package_data={ 'lcinvestor': ['VERSION'], 'lcinvestor.settings': ['settings.yaml'] }, scripts=['bin/lcinvestor', 'bin/lcinvestor.bat'], url='https://github.com/jgillick/LendingClubAutoInvestor', license=open('LICENSE.txt').read(), description='A simple tool that will watch your LendingClub account and automatically invest cash as it becomes available.', long_description=open('README.rst').read(), install_requires=[ "lendingclub >= 0.1.3", # "python-daemon >= 1.6", "argparse >= 1.2.1", "pyyaml >= 3.09", "pause >= 0.1.2" ], platforms='osx, posix, linux, windows', classifiers=[ 'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Environment :: Console', 'Environment :: No Input/Output (Daemon)', 'Topic :: Office/Business :: Financial' ], keywords='lendingclub investing daemon' )
# ... existing code ... }, scripts=['bin/lcinvestor', 'bin/lcinvestor.bat'], url='https://github.com/jgillick/LendingClubAutoInvestor', # ... modified code ... install_requires=[ "lendingclub >= 0.1.3", # "python-daemon >= 1.6", ... "argparse >= 1.2.1", "pyyaml >= 3.09", # ... rest of the code ...
0e835c6381374c5b00b7387057d056d679f635c4
zproject/legacy_urls.py
zproject/legacy_urls.py
from django.conf.urls import url import zerver.views import zerver.views.streams import zerver.views.auth import zerver.views.tutorial import zerver.views.report # Future endpoints should add to urls.py, which includes these legacy urls legacy_urls = [ # These are json format views used by the web client. They require a logged in browser. # We should remove this endpoint and all code related to it. # It returns a 404 if the stream doesn't exist, which is confusing # for devs, and I don't think we need to go to the server # any more to find out about subscriptions, since they are already # pushed to us via the event system. url(r'^json/subscriptions/exists$', zerver.views.streams.json_stream_exists), ]
from django.urls import path import zerver.views import zerver.views.streams import zerver.views.auth import zerver.views.tutorial import zerver.views.report # Future endpoints should add to urls.py, which includes these legacy urls legacy_urls = [ # These are json format views used by the web client. They require a logged in browser. # We should remove this endpoint and all code related to it. # It returns a 404 if the stream doesn't exist, which is confusing # for devs, and I don't think we need to go to the server # any more to find out about subscriptions, since they are already # pushed to us via the event system. path('json/subscriptions/exists', zerver.views.streams.json_stream_exists), ]
Migrate legacy urls to use modern django pattern.
urls: Migrate legacy urls to use modern django pattern.
Python
apache-2.0
shubhamdhama/zulip,punchagan/zulip,kou/zulip,showell/zulip,hackerkid/zulip,timabbott/zulip,eeshangarg/zulip,kou/zulip,andersk/zulip,zulip/zulip,synicalsyntax/zulip,zulip/zulip,andersk/zulip,shubhamdhama/zulip,showell/zulip,kou/zulip,hackerkid/zulip,shubhamdhama/zulip,andersk/zulip,eeshangarg/zulip,brainwane/zulip,shubhamdhama/zulip,hackerkid/zulip,punchagan/zulip,showell/zulip,brainwane/zulip,rht/zulip,rht/zulip,shubhamdhama/zulip,hackerkid/zulip,eeshangarg/zulip,hackerkid/zulip,brainwane/zulip,brainwane/zulip,punchagan/zulip,punchagan/zulip,punchagan/zulip,synicalsyntax/zulip,timabbott/zulip,zulip/zulip,rht/zulip,eeshangarg/zulip,brainwane/zulip,punchagan/zulip,timabbott/zulip,timabbott/zulip,synicalsyntax/zulip,synicalsyntax/zulip,hackerkid/zulip,synicalsyntax/zulip,showell/zulip,timabbott/zulip,rht/zulip,kou/zulip,rht/zulip,synicalsyntax/zulip,timabbott/zulip,rht/zulip,eeshangarg/zulip,zulip/zulip,zulip/zulip,eeshangarg/zulip,zulip/zulip,punchagan/zulip,hackerkid/zulip,kou/zulip,andersk/zulip,rht/zulip,andersk/zulip,kou/zulip,shubhamdhama/zulip,andersk/zulip,showell/zulip,timabbott/zulip,synicalsyntax/zulip,eeshangarg/zulip,showell/zulip,kou/zulip,brainwane/zulip,brainwane/zulip,andersk/zulip,zulip/zulip,shubhamdhama/zulip,showell/zulip
- from django.conf.urls import url + from django.urls import path import zerver.views import zerver.views.streams import zerver.views.auth import zerver.views.tutorial import zerver.views.report # Future endpoints should add to urls.py, which includes these legacy urls legacy_urls = [ # These are json format views used by the web client. They require a logged in browser. # We should remove this endpoint and all code related to it. # It returns a 404 if the stream doesn't exist, which is confusing # for devs, and I don't think we need to go to the server # any more to find out about subscriptions, since they are already # pushed to us via the event system. - url(r'^json/subscriptions/exists$', zerver.views.streams.json_stream_exists), + path('json/subscriptions/exists', zerver.views.streams.json_stream_exists), ]
Migrate legacy urls to use modern django pattern.
## Code Before: from django.conf.urls import url import zerver.views import zerver.views.streams import zerver.views.auth import zerver.views.tutorial import zerver.views.report # Future endpoints should add to urls.py, which includes these legacy urls legacy_urls = [ # These are json format views used by the web client. They require a logged in browser. # We should remove this endpoint and all code related to it. # It returns a 404 if the stream doesn't exist, which is confusing # for devs, and I don't think we need to go to the server # any more to find out about subscriptions, since they are already # pushed to us via the event system. url(r'^json/subscriptions/exists$', zerver.views.streams.json_stream_exists), ] ## Instruction: Migrate legacy urls to use modern django pattern. ## Code After: from django.urls import path import zerver.views import zerver.views.streams import zerver.views.auth import zerver.views.tutorial import zerver.views.report # Future endpoints should add to urls.py, which includes these legacy urls legacy_urls = [ # These are json format views used by the web client. They require a logged in browser. # We should remove this endpoint and all code related to it. # It returns a 404 if the stream doesn't exist, which is confusing # for devs, and I don't think we need to go to the server # any more to find out about subscriptions, since they are already # pushed to us via the event system. path('json/subscriptions/exists', zerver.views.streams.json_stream_exists), ]
# ... existing code ... from django.urls import path import zerver.views # ... modified code ... # pushed to us via the event system. path('json/subscriptions/exists', zerver.views.streams.json_stream_exists), ] # ... rest of the code ...
0dc84650b2929d31c054882ad67570fda6f1ffb9
incuna_test_utils/testcases/urls.py
incuna_test_utils/testcases/urls.py
from django.core.urlresolvers import resolve, reverse from django.test import TestCase class URLsTestCase(TestCase): """A TestCase with a check_url helper method for testing urls""" def check_url(self, view_class, url, url_name, url_args=None, url_kwargs=None): """ Assert a view's url is correctly configured Check the url_name reverses to give a correctly formated url. Check the url resolves to the correct view. """ reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) self.assertEqual(reversed_url, url) resolved_view_class = resolve(url).func.cls self.assertEqual(resolved_view_class, view_class)
from django.core.urlresolvers import resolve, reverse from django.test import TestCase class URLsMixin(object): """A TestCase Mixin with a check_url helper method for testing urls""" def check_url(self, view_class, expected_url, url_name, url_args=None, url_kwargs=None): """ Assert a view's url is correctly configured Check the url_name reverses to give a correctly formated expected_url. Check the expected_url resolves to the correct view. """ reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) self.assertEqual(reversed_url, expected_url) resolved_view_class = resolve(expected_url).func.cls self.assertEqual(resolved_view_class, view_class) class URLsTestCase(URLsMixin, TestCase): pass
Rename url -> expected_url; Add URLsMixin
Rename url -> expected_url; Add URLsMixin
Python
bsd-2-clause
incuna/incuna-test-utils,incuna/incuna-test-utils
from django.core.urlresolvers import resolve, reverse from django.test import TestCase - class URLsTestCase(TestCase): + class URLsMixin(object): - """A TestCase with a check_url helper method for testing urls""" + """A TestCase Mixin with a check_url helper method for testing urls""" - def check_url(self, view_class, url, url_name, url_args=None, url_kwargs=None): + def check_url(self, view_class, expected_url, url_name, + url_args=None, url_kwargs=None): """ Assert a view's url is correctly configured - Check the url_name reverses to give a correctly formated url. + Check the url_name reverses to give a correctly formated expected_url. - Check the url resolves to the correct view. + Check the expected_url resolves to the correct view. """ reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) - self.assertEqual(reversed_url, url) + self.assertEqual(reversed_url, expected_url) - resolved_view_class = resolve(url).func.cls + resolved_view_class = resolve(expected_url).func.cls self.assertEqual(resolved_view_class, view_class) + + class URLsTestCase(URLsMixin, TestCase): + pass +
Rename url -> expected_url; Add URLsMixin
## Code Before: from django.core.urlresolvers import resolve, reverse from django.test import TestCase class URLsTestCase(TestCase): """A TestCase with a check_url helper method for testing urls""" def check_url(self, view_class, url, url_name, url_args=None, url_kwargs=None): """ Assert a view's url is correctly configured Check the url_name reverses to give a correctly formated url. Check the url resolves to the correct view. """ reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) self.assertEqual(reversed_url, url) resolved_view_class = resolve(url).func.cls self.assertEqual(resolved_view_class, view_class) ## Instruction: Rename url -> expected_url; Add URLsMixin ## Code After: from django.core.urlresolvers import resolve, reverse from django.test import TestCase class URLsMixin(object): """A TestCase Mixin with a check_url helper method for testing urls""" def check_url(self, view_class, expected_url, url_name, url_args=None, url_kwargs=None): """ Assert a view's url is correctly configured Check the url_name reverses to give a correctly formated expected_url. Check the expected_url resolves to the correct view. """ reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) self.assertEqual(reversed_url, expected_url) resolved_view_class = resolve(expected_url).func.cls self.assertEqual(resolved_view_class, view_class) class URLsTestCase(URLsMixin, TestCase): pass
// ... existing code ... class URLsMixin(object): """A TestCase Mixin with a check_url helper method for testing urls""" def check_url(self, view_class, expected_url, url_name, url_args=None, url_kwargs=None): """ // ... modified code ... Check the url_name reverses to give a correctly formated expected_url. Check the expected_url resolves to the correct view. """ ... reversed_url = reverse(url_name, args=url_args, kwargs=url_kwargs) self.assertEqual(reversed_url, expected_url) resolved_view_class = resolve(expected_url).func.cls self.assertEqual(resolved_view_class, view_class) class URLsTestCase(URLsMixin, TestCase): pass // ... rest of the code ...
92adf36a7aaf6d4741944b6c606f0cf4902f232d
letters/admin.py
letters/admin.py
from dal import autocomplete from django import forms from django.contrib import admin from .models import Letter, Topic from prosopography.models import Person class PersonInlineForm(forms.ModelForm): class Meta: model = Person.letters_to.through fields = ('__all__') widgets = { 'person': autocomplete.ModelSelect2( url='people:dal-autocomplete', attrs={ 'data-placeholder': 'Type to search...', 'data-minimum-input-length': 2, } ), } class PersonInline(admin.TabularInline): model = Person.letters_to.through form = PersonInlineForm class LetterAdmin(admin.ModelAdmin): model = Letter inlines = [PersonInline] fields = ('book', 'letter', 'topics', 'date', 'citations') search_fields = ('book', 'letter', 'letters_to__nomina') list_filter = ('book',) filter_horizontal = ('citations',) admin.site.register(Letter, LetterAdmin) admin.site.register(Topic)
from dal import autocomplete from django import forms from django.contrib import admin from letters.models import Letter, Topic from prosopography.models import Person class PersonInlineForm(forms.ModelForm): """Configure inline admin form for :class:`prosopography.models.Person` """ class Meta: model = Person.letters_to.through fields = ('__all__') widgets = { 'person': autocomplete.ModelSelect2( url='people:dal-autocomplete', attrs={ 'data-placeholder': 'Type to search...', 'data-minimum-input-length': 2, } ), } class PersonInline(admin.TabularInline): """:class:`prosopography.models.Person` admin inline for M2M.""" model = Person.letters_to.through form = PersonInlineForm class LetterAdmin(admin.ModelAdmin): """ModelAdmin for :class:`letters.models.Letter`""" model = Letter inlines = [PersonInline] fields = ('book', 'letter', 'topics', 'date', 'citations') search_fields = ('book', 'letter', 'letters_to__nomina') list_filter = ('book',) filter_horizontal = ('citations',) admin.site.register(Letter, LetterAdmin) admin.site.register(Topic)
Add some documentation to letters
Add some documentation to letters
Python
mit
bwhicks/PlinyProject,bwhicks/PlinyProject,bwhicks/PlinyProject,bwhicks/PlinyProject
from dal import autocomplete from django import forms from django.contrib import admin - from .models import Letter, Topic + from letters.models import Letter, Topic from prosopography.models import Person class PersonInlineForm(forms.ModelForm): + """Configure inline admin form for :class:`prosopography.models.Person` """ class Meta: model = Person.letters_to.through fields = ('__all__') widgets = { 'person': autocomplete.ModelSelect2( url='people:dal-autocomplete', attrs={ 'data-placeholder': 'Type to search...', 'data-minimum-input-length': 2, } ), } class PersonInline(admin.TabularInline): + """:class:`prosopography.models.Person` admin inline for M2M.""" model = Person.letters_to.through form = PersonInlineForm class LetterAdmin(admin.ModelAdmin): + """ModelAdmin for :class:`letters.models.Letter`""" model = Letter inlines = [PersonInline] fields = ('book', 'letter', 'topics', 'date', 'citations') search_fields = ('book', 'letter', 'letters_to__nomina') list_filter = ('book',) filter_horizontal = ('citations',) admin.site.register(Letter, LetterAdmin) admin.site.register(Topic)
Add some documentation to letters
## Code Before: from dal import autocomplete from django import forms from django.contrib import admin from .models import Letter, Topic from prosopography.models import Person class PersonInlineForm(forms.ModelForm): class Meta: model = Person.letters_to.through fields = ('__all__') widgets = { 'person': autocomplete.ModelSelect2( url='people:dal-autocomplete', attrs={ 'data-placeholder': 'Type to search...', 'data-minimum-input-length': 2, } ), } class PersonInline(admin.TabularInline): model = Person.letters_to.through form = PersonInlineForm class LetterAdmin(admin.ModelAdmin): model = Letter inlines = [PersonInline] fields = ('book', 'letter', 'topics', 'date', 'citations') search_fields = ('book', 'letter', 'letters_to__nomina') list_filter = ('book',) filter_horizontal = ('citations',) admin.site.register(Letter, LetterAdmin) admin.site.register(Topic) ## Instruction: Add some documentation to letters ## Code After: from dal import autocomplete from django import forms from django.contrib import admin from letters.models import Letter, Topic from prosopography.models import Person class PersonInlineForm(forms.ModelForm): """Configure inline admin form for :class:`prosopography.models.Person` """ class Meta: model = Person.letters_to.through fields = ('__all__') widgets = { 'person': autocomplete.ModelSelect2( url='people:dal-autocomplete', attrs={ 'data-placeholder': 'Type to search...', 'data-minimum-input-length': 2, } ), } class PersonInline(admin.TabularInline): """:class:`prosopography.models.Person` admin inline for M2M.""" model = Person.letters_to.through form = PersonInlineForm class LetterAdmin(admin.ModelAdmin): """ModelAdmin for :class:`letters.models.Letter`""" model = Letter inlines = [PersonInline] fields = ('book', 'letter', 'topics', 'date', 'citations') search_fields = ('book', 'letter', 'letters_to__nomina') list_filter = ('book',) filter_horizontal = ('citations',) admin.site.register(Letter, LetterAdmin) admin.site.register(Topic)
// ... existing code ... from django.contrib import admin from letters.models import Letter, Topic from prosopography.models import Person // ... modified code ... class PersonInlineForm(forms.ModelForm): """Configure inline admin form for :class:`prosopography.models.Person` """ class Meta: ... class PersonInline(admin.TabularInline): """:class:`prosopography.models.Person` admin inline for M2M.""" model = Person.letters_to.through ... class LetterAdmin(admin.ModelAdmin): """ModelAdmin for :class:`letters.models.Letter`""" model = Letter // ... rest of the code ...
f1cc57acd774eace69da7ec0ae9a516207d8ff5c
pyrfc3339/__init__.py
pyrfc3339/__init__.py
from generator import generate from parser import parse __all__ = ['generate', 'parse']
from pyrfc3339.generator import generate from pyrfc3339.parser import parse __all__ = ['generate', 'parse']
Fix imports for Python 3
Fix imports for Python 3
Python
mit
kurtraschke/pyRFC3339
- from generator import generate + from pyrfc3339.generator import generate - from parser import parse + from pyrfc3339.parser import parse __all__ = ['generate', 'parse']
Fix imports for Python 3
## Code Before: from generator import generate from parser import parse __all__ = ['generate', 'parse'] ## Instruction: Fix imports for Python 3 ## Code After: from pyrfc3339.generator import generate from pyrfc3339.parser import parse __all__ = ['generate', 'parse']
// ... existing code ... from pyrfc3339.generator import generate from pyrfc3339.parser import parse // ... rest of the code ...
7558ffc73ebb6300e186fe508497a32acbc0c5ae
src/pythonic/test_primes.py
src/pythonic/test_primes.py
import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_upper_bound_exception(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_checking_above_upper_bound_is_an_error(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
Reword guard test on upper bounds
Reword guard test on upper bounds
Python
cc0-1.0
Michael-F-Bryan/rust-ffi-guide,Michael-F-Bryan/rust-ffi-guide,Michael-F-Bryan/rust-ffi-guide
import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit - def test_upper_bound_exception(): + def test_checking_above_upper_bound_is_an_error(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
Reword guard test on upper bounds
## Code Before: import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_upper_bound_exception(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n ## Instruction: Reword guard test on upper bounds ## Code After: import pytest import itertools from main import Primes, Sieve def test_sieve_limit(): limit = 10000 with Sieve(limit) as s: assert s.upper_bound() >= limit def test_checking_above_upper_bound_is_an_error(): limit = 10 with Sieve(limit) as s: with pytest.raises(IndexError): s.is_prime(101) def test_zero_is_not_in_prime_list(): with Primes() as p: n = 20 assert 0 not in list(itertools.islice(p, n)) def test_number_primes_asked_is_given(): with Primes() as p: n = 20 assert len(list(itertools.islice(p, n))) == n
// ... existing code ... def test_checking_above_upper_bound_is_an_error(): limit = 10 // ... rest of the code ...
379c6254da0d6a06f8c01cd7cd2632a1d59624ac
comics/sets/context_processors.py
comics/sets/context_processors.py
from comics.sets.models import UserSet def user_set(request): try: user_set = UserSet.objects.get(user=request.user) return { 'user_set': user_set, 'user_set_comics': user_set.comics.all(), } except UserSet.DoesNotExist: return {}
def user_set(request): if hasattr(request, 'user_set'): return { 'user_set': request.user_set, 'user_set_comics': request.user_set.comics.all(), } else: return {}
Use request.user_set in context preprocessor
Use request.user_set in context preprocessor
Python
agpl-3.0
datagutten/comics,datagutten/comics,jodal/comics,jodal/comics,jodal/comics,datagutten/comics,jodal/comics,datagutten/comics
- from comics.sets.models import UserSet - def user_set(request): + if hasattr(request, 'user_set'): - try: - user_set = UserSet.objects.get(user=request.user) return { - 'user_set': user_set, + 'user_set': request.user_set, - 'user_set_comics': user_set.comics.all(), + 'user_set_comics': request.user_set.comics.all(), } - except UserSet.DoesNotExist: + else: return {}
Use request.user_set in context preprocessor
## Code Before: from comics.sets.models import UserSet def user_set(request): try: user_set = UserSet.objects.get(user=request.user) return { 'user_set': user_set, 'user_set_comics': user_set.comics.all(), } except UserSet.DoesNotExist: return {} ## Instruction: Use request.user_set in context preprocessor ## Code After: def user_set(request): if hasattr(request, 'user_set'): return { 'user_set': request.user_set, 'user_set_comics': request.user_set.comics.all(), } else: return {}
# ... existing code ... def user_set(request): if hasattr(request, 'user_set'): return { 'user_set': request.user_set, 'user_set_comics': request.user_set.comics.all(), } else: return {} # ... rest of the code ...
0654d962918327e5143fb9250ad344de26e284eb
electrumx_server.py
electrumx_server.py
'''Script to kick off the server.''' import logging import traceback from server.env import Env from server.controller import Controller def main(): '''Set up logging and run the server.''' logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-9s %(message)-100s ' '%(name)s [%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') try: controller = Controller(Env()) controller.run() except Exception: traceback.print_exc() logging.critical('ElectrumX server terminated abnormally') else: logging.info('ElectrumX server terminated normally') if __name__ == '__main__': main()
'''Script to kick off the server.''' import logging import traceback from server.env import Env from server.controller import Controller def main(): '''Set up logging and run the server.''' logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-7s %(message)-100s ' '[%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') try: controller = Controller(Env()) controller.run() except Exception: traceback.print_exc() logging.critical('ElectrumX server terminated abnormally') else: logging.info('ElectrumX server terminated normally') if __name__ == '__main__': main()
Remove logger name from logs
Remove logger name from logs
Python
mit
thelazier/electrumx,shsmith/electrumx,shsmith/electrumx,erasmospunk/electrumx,erasmospunk/electrumx,thelazier/electrumx
'''Script to kick off the server.''' import logging import traceback from server.env import Env from server.controller import Controller def main(): '''Set up logging and run the server.''' logging.basicConfig(level=logging.INFO, - format='%(asctime)s %(levelname)-9s %(message)-100s ' + format='%(asctime)s %(levelname)-7s %(message)-100s ' - '%(name)s [%(filename)s:%(lineno)d]') + '[%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') try: controller = Controller(Env()) controller.run() except Exception: traceback.print_exc() logging.critical('ElectrumX server terminated abnormally') else: logging.info('ElectrumX server terminated normally') if __name__ == '__main__': main()
Remove logger name from logs
## Code Before: '''Script to kick off the server.''' import logging import traceback from server.env import Env from server.controller import Controller def main(): '''Set up logging and run the server.''' logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-9s %(message)-100s ' '%(name)s [%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') try: controller = Controller(Env()) controller.run() except Exception: traceback.print_exc() logging.critical('ElectrumX server terminated abnormally') else: logging.info('ElectrumX server terminated normally') if __name__ == '__main__': main() ## Instruction: Remove logger name from logs ## Code After: '''Script to kick off the server.''' import logging import traceback from server.env import Env from server.controller import Controller def main(): '''Set up logging and run the server.''' logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-7s %(message)-100s ' '[%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') try: controller = Controller(Env()) controller.run() except Exception: traceback.print_exc() logging.critical('ElectrumX server terminated abnormally') else: logging.info('ElectrumX server terminated normally') if __name__ == '__main__': main()
// ... existing code ... logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-7s %(message)-100s ' '[%(filename)s:%(lineno)d]') logging.info('ElectrumX server starting') // ... rest of the code ...
2e1b5f4804023cd551b1d641e4b4dc5ba693ff62
demos/simple.py
demos/simple.py
import numpy as np import matplotlib.pyplot as pl import pygp as pg import pybo.models as pbm import pybo.policies as pbp if __name__ == '__main__': sn = 0.2 ell = 0.670104947766 sf = 1.25415619045 model = pbm.Sinusoidal(0.2) gp = pg.BasicGP(sn, ell, sf) policy = pbp.GPUCB(gp, model.bounds) xmin = model.bounds[0][0] xmax = model.bounds[0][1] X = np.linspace(xmin, xmax, 200)[:, None] x = (xmax-xmin) / 2 for i in xrange(40): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) pl.plot(X, policy.get_index(X), lw=2) pl.axvline(x, color='r') pl.axis('tight') pl.axis(xmin=xmin, xmax=xmax) pl.draw() y = model.get_data(x) policy.add_data(x, y) x = policy.get_next()
import numpy as np import matplotlib.pyplot as pl import pygp as pg import pybo.models as pbm import pybo.policies as pbp def run_model(Model, sn, ell, sf, T): model = Model(0.2) gp = pg.BasicGP(sn, ell, sf) policy = pbp.GPUCB(gp, model.bounds) xmin = model.bounds[0][0] xmax = model.bounds[0][1] X = np.linspace(xmin, xmax, 200)[:, None] x = (xmax-xmin) / 2 for i in xrange(T): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) pl.plot(X, policy.get_index(X), lw=2) pl.axvline(x, color='r') pl.axis('tight') pl.axis(xmin=xmin, xmax=xmax) pl.draw() y = model.get_data(x) policy.add_data(x, y) x = policy.get_next() if __name__ == '__main__': # run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100) run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100)
Add a harder test example.
Add a harder test example.
Python
bsd-2-clause
mwhoffman/pybo,jhartford/pybo
import numpy as np import matplotlib.pyplot as pl import pygp as pg import pybo.models as pbm import pybo.policies as pbp + def run_model(Model, sn, ell, sf, T): - if __name__ == '__main__': - sn = 0.2 - ell = 0.670104947766 - sf = 1.25415619045 - - model = pbm.Sinusoidal(0.2) + model = Model(0.2) gp = pg.BasicGP(sn, ell, sf) policy = pbp.GPUCB(gp, model.bounds) xmin = model.bounds[0][0] xmax = model.bounds[0][1] X = np.linspace(xmin, xmax, 200)[:, None] x = (xmax-xmin) / 2 - for i in xrange(40): + for i in xrange(T): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) pl.plot(X, policy.get_index(X), lw=2) pl.axvline(x, color='r') pl.axis('tight') pl.axis(xmin=xmin, xmax=xmax) pl.draw() y = model.get_data(x) policy.add_data(x, y) x = policy.get_next() + + if __name__ == '__main__': + # run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100) + run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100) +
Add a harder test example.
## Code Before: import numpy as np import matplotlib.pyplot as pl import pygp as pg import pybo.models as pbm import pybo.policies as pbp if __name__ == '__main__': sn = 0.2 ell = 0.670104947766 sf = 1.25415619045 model = pbm.Sinusoidal(0.2) gp = pg.BasicGP(sn, ell, sf) policy = pbp.GPUCB(gp, model.bounds) xmin = model.bounds[0][0] xmax = model.bounds[0][1] X = np.linspace(xmin, xmax, 200)[:, None] x = (xmax-xmin) / 2 for i in xrange(40): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) pl.plot(X, policy.get_index(X), lw=2) pl.axvline(x, color='r') pl.axis('tight') pl.axis(xmin=xmin, xmax=xmax) pl.draw() y = model.get_data(x) policy.add_data(x, y) x = policy.get_next() ## Instruction: Add a harder test example. ## Code After: import numpy as np import matplotlib.pyplot as pl import pygp as pg import pybo.models as pbm import pybo.policies as pbp def run_model(Model, sn, ell, sf, T): model = Model(0.2) gp = pg.BasicGP(sn, ell, sf) policy = pbp.GPUCB(gp, model.bounds) xmin = model.bounds[0][0] xmax = model.bounds[0][1] X = np.linspace(xmin, xmax, 200)[:, None] x = (xmax-xmin) / 2 for i in xrange(T): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) pl.plot(X, policy.get_index(X), lw=2) pl.axvline(x, color='r') pl.axis('tight') pl.axis(xmin=xmin, xmax=xmax) pl.draw() y = model.get_data(x) policy.add_data(x, y) x = policy.get_next() if __name__ == '__main__': # run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100) run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100)
# ... existing code ... def run_model(Model, sn, ell, sf, T): model = Model(0.2) gp = pg.BasicGP(sn, ell, sf) # ... modified code ... for i in xrange(T): pg.gpplot(policy.gp, xmin=xmin, xmax=xmax) ... x = policy.get_next() if __name__ == '__main__': # run_model(pbm.Sinusoidal, 0.2, 0.70, 1.25, 100) run_model(pbm.Gramacy, 0.2, 0.05, 1.25, 100) # ... rest of the code ...
c79d040cb952e8e37c231caf90eda92d152978b8
openfisca_country_template/__init__.py
openfisca_country_template/__init__.py
import os from openfisca_core.taxbenefitsystems import TaxBenefitSystem from . import entities COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__)) # Our country tax and benefit class inherits from the general TaxBenefitSystem class. # The name CountryTaxBenefitSystem must not be changed, as all tools of the OpenFisca ecosystem expect a CountryTaxBenefitSystem class to be exposed in the __init__ module of a country package. class CountryTaxBenefitSystem(TaxBenefitSystem): def __init__(self): # We initialize our tax and benefit system with the general constructor super(CountryTaxBenefitSystem, self).__init__(entities.entities) # We add to our tax and benefit system all the variables self.add_variables_from_directory(os.path.join(COUNTRY_DIR, 'variables')) # We add to our tax and benefit system all the legislation parameters defined in the parameters files param_files = [ '__root__.xml', 'benefits.xml', 'general.xml', 'taxes.xml', ] for param_file in param_files: param_path = os.path.join(COUNTRY_DIR, 'parameters', param_file) self.add_legislation_params(param_path)
import os from openfisca_core.taxbenefitsystems import TaxBenefitSystem from . import entities COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__)) # Our country tax and benefit class inherits from the general TaxBenefitSystem class. # The name CountryTaxBenefitSystem must not be changed, as all tools of the OpenFisca ecosystem expect a CountryTaxBenefitSystem class to be exposed in the __init__ module of a country package. class CountryTaxBenefitSystem(TaxBenefitSystem): def __init__(self): # We initialize our tax and benefit system with the general constructor super(CountryTaxBenefitSystem, self).__init__(entities.entities) # We add to our tax and benefit system all the variables self.add_variables_from_directory(os.path.join(COUNTRY_DIR, 'variables')) # We add to our tax and benefit system all the legislation parameters defined in the parameters files param_path = os.path.join(COUNTRY_DIR, 'parameters') self.add_legislation_params(param_path)
Use YAML params instead of XML params
Use YAML params instead of XML params
Python
agpl-3.0
openfisca/country-template,openfisca/country-template
import os from openfisca_core.taxbenefitsystems import TaxBenefitSystem from . import entities COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__)) # Our country tax and benefit class inherits from the general TaxBenefitSystem class. # The name CountryTaxBenefitSystem must not be changed, as all tools of the OpenFisca ecosystem expect a CountryTaxBenefitSystem class to be exposed in the __init__ module of a country package. class CountryTaxBenefitSystem(TaxBenefitSystem): def __init__(self): # We initialize our tax and benefit system with the general constructor super(CountryTaxBenefitSystem, self).__init__(entities.entities) # We add to our tax and benefit system all the variables self.add_variables_from_directory(os.path.join(COUNTRY_DIR, 'variables')) # We add to our tax and benefit system all the legislation parameters defined in the parameters files + param_path = os.path.join(COUNTRY_DIR, 'parameters') + self.add_legislation_params(param_path) - param_files = [ - '__root__.xml', - 'benefits.xml', - 'general.xml', - 'taxes.xml', - ] - for param_file in param_files: - param_path = os.path.join(COUNTRY_DIR, 'parameters', param_file) - self.add_legislation_params(param_path) -
Use YAML params instead of XML params
## Code Before: import os from openfisca_core.taxbenefitsystems import TaxBenefitSystem from . import entities COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__)) # Our country tax and benefit class inherits from the general TaxBenefitSystem class. # The name CountryTaxBenefitSystem must not be changed, as all tools of the OpenFisca ecosystem expect a CountryTaxBenefitSystem class to be exposed in the __init__ module of a country package. class CountryTaxBenefitSystem(TaxBenefitSystem): def __init__(self): # We initialize our tax and benefit system with the general constructor super(CountryTaxBenefitSystem, self).__init__(entities.entities) # We add to our tax and benefit system all the variables self.add_variables_from_directory(os.path.join(COUNTRY_DIR, 'variables')) # We add to our tax and benefit system all the legislation parameters defined in the parameters files param_files = [ '__root__.xml', 'benefits.xml', 'general.xml', 'taxes.xml', ] for param_file in param_files: param_path = os.path.join(COUNTRY_DIR, 'parameters', param_file) self.add_legislation_params(param_path) ## Instruction: Use YAML params instead of XML params ## Code After: import os from openfisca_core.taxbenefitsystems import TaxBenefitSystem from . import entities COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__)) # Our country tax and benefit class inherits from the general TaxBenefitSystem class. # The name CountryTaxBenefitSystem must not be changed, as all tools of the OpenFisca ecosystem expect a CountryTaxBenefitSystem class to be exposed in the __init__ module of a country package. class CountryTaxBenefitSystem(TaxBenefitSystem): def __init__(self): # We initialize our tax and benefit system with the general constructor super(CountryTaxBenefitSystem, self).__init__(entities.entities) # We add to our tax and benefit system all the variables self.add_variables_from_directory(os.path.join(COUNTRY_DIR, 'variables')) # We add to our tax and benefit system all the legislation parameters defined in the parameters files param_path = os.path.join(COUNTRY_DIR, 'parameters') self.add_legislation_params(param_path)
# ... existing code ... # We add to our tax and benefit system all the legislation parameters defined in the parameters files param_path = os.path.join(COUNTRY_DIR, 'parameters') self.add_legislation_params(param_path) # ... rest of the code ...
86b889049ef1ee1c896e4ab44185fc54ef87a2c0
IPython/consoleapp.py
IPython/consoleapp.py
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from warnings import warn warn("The `IPython.consoleapp` package has been deprecated. " "You should import from jupyter_client.consoleapp instead.", DeprecationWarning, stacklevel=2) from jupyter_client.consoleapp import *
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from warnings import warn warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0." "You should import from jupyter_client.consoleapp instead.", stacklevel=2) from jupyter_client.consoleapp import *
Remove Deprecation Warning, add since when things were deprecated.
Remove Deprecation Warning, add since when things were deprecated.
Python
bsd-3-clause
ipython/ipython,ipython/ipython
# Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from warnings import warn - warn("The `IPython.consoleapp` package has been deprecated. " + warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0." - "You should import from jupyter_client.consoleapp instead.", DeprecationWarning, stacklevel=2) + "You should import from jupyter_client.consoleapp instead.", stacklevel=2) from jupyter_client.consoleapp import *
Remove Deprecation Warning, add since when things were deprecated.
## Code Before: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from warnings import warn warn("The `IPython.consoleapp` package has been deprecated. " "You should import from jupyter_client.consoleapp instead.", DeprecationWarning, stacklevel=2) from jupyter_client.consoleapp import * ## Instruction: Remove Deprecation Warning, add since when things were deprecated. ## Code After: # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from warnings import warn warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0." "You should import from jupyter_client.consoleapp instead.", stacklevel=2) from jupyter_client.consoleapp import *
# ... existing code ... warn("The `IPython.consoleapp` package has been deprecated since IPython 4.0." "You should import from jupyter_client.consoleapp instead.", stacklevel=2) # ... rest of the code ...
53878700a4da22e80114ef67a4aee340846abf91
us_ignite/search/urls.py
us_ignite/search/urls.py
from django.conf.urls import patterns, url urlpatterns = patterns( 'us_ignite.search.views', url(r'apps/', 'search_apps', name='search_apps'), url(r'events/', 'search_events', name='search_events'), url(r'hubs/', 'search_hubs', name='search_hubs'), url(r'orgs/', 'search_organizations', name='search_organizations'), url(r'resources/', 'search_resources', name='search_resources'), )
from django.conf.urls import patterns, url urlpatterns = patterns( 'us_ignite.search.views', url(r'^apps/$', 'search_apps', name='search_apps'), url(r'^events/$', 'search_events', name='search_events'), url(r'^hubs/$', 'search_hubs', name='search_hubs'), url(r'^orgs/$', 'search_organizations', name='search_organizations'), url(r'^resources/$', 'search_resources', name='search_resources'), )
Fix broad regex for the ``search`` URLs.
Fix broad regex for the ``search`` URLs.
Python
bsd-3-clause
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
from django.conf.urls import patterns, url urlpatterns = patterns( 'us_ignite.search.views', - url(r'apps/', 'search_apps', name='search_apps'), + url(r'^apps/$', 'search_apps', name='search_apps'), - url(r'events/', 'search_events', name='search_events'), + url(r'^events/$', 'search_events', name='search_events'), - url(r'hubs/', 'search_hubs', name='search_hubs'), + url(r'^hubs/$', 'search_hubs', name='search_hubs'), - url(r'orgs/', 'search_organizations', name='search_organizations'), + url(r'^orgs/$', 'search_organizations', name='search_organizations'), - url(r'resources/', 'search_resources', name='search_resources'), + url(r'^resources/$', 'search_resources', name='search_resources'), )
Fix broad regex for the ``search`` URLs.
## Code Before: from django.conf.urls import patterns, url urlpatterns = patterns( 'us_ignite.search.views', url(r'apps/', 'search_apps', name='search_apps'), url(r'events/', 'search_events', name='search_events'), url(r'hubs/', 'search_hubs', name='search_hubs'), url(r'orgs/', 'search_organizations', name='search_organizations'), url(r'resources/', 'search_resources', name='search_resources'), ) ## Instruction: Fix broad regex for the ``search`` URLs. ## Code After: from django.conf.urls import patterns, url urlpatterns = patterns( 'us_ignite.search.views', url(r'^apps/$', 'search_apps', name='search_apps'), url(r'^events/$', 'search_events', name='search_events'), url(r'^hubs/$', 'search_hubs', name='search_hubs'), url(r'^orgs/$', 'search_organizations', name='search_organizations'), url(r'^resources/$', 'search_resources', name='search_resources'), )
... 'us_ignite.search.views', url(r'^apps/$', 'search_apps', name='search_apps'), url(r'^events/$', 'search_events', name='search_events'), url(r'^hubs/$', 'search_hubs', name='search_hubs'), url(r'^orgs/$', 'search_organizations', name='search_organizations'), url(r'^resources/$', 'search_resources', name='search_resources'), ) ...
1599d4ed14fb3d7c7e551c9f6ce3f86d9df17cbd
mammoth/writers/html.py
mammoth/writers/html.py
from __future__ import unicode_literals from .abc import Writer import cgi class HtmlWriter(Writer): def __init__(self): self._fragments = [] def text(self, text): self._fragments.append(_escape_html(text)) def start(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1}>".format(name, attribute_string)) def end(self, name): self._fragments.append("</{0}>".format(name)) def self_closing(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1} />".format(name, attribute_string)) def append(self, html): self._fragments.append(html) def as_string(self): return "".join(self._fragments) def _escape_html(text): return cgi.escape(text, quote=True) def _generate_attribute_string(attributes): if attributes is None: return "" else: return "".join( ' {0}="{1}"'.format(key, _escape_html(attributes[key])) for key in sorted(attributes) )
from __future__ import unicode_literals from xml.sax.saxutils import escape from .abc import Writer class HtmlWriter(Writer): def __init__(self): self._fragments = [] def text(self, text): self._fragments.append(_escape_html(text)) def start(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1}>".format(name, attribute_string)) def end(self, name): self._fragments.append("</{0}>".format(name)) def self_closing(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1} />".format(name, attribute_string)) def append(self, html): self._fragments.append(html) def as_string(self): return "".join(self._fragments) def _escape_html(text): return escape(text, {'"': "&quot;"}) def _generate_attribute_string(attributes): if attributes is None: return "" else: return "".join( ' {0}="{1}"'.format(key, _escape_html(attributes[key])) for key in sorted(attributes) )
Use xml.sax.saxutils.escape instead of deprecated cgi.escape
Use xml.sax.saxutils.escape instead of deprecated cgi.escape ``` /usr/local/lib/python3.6/dist-packages/mammoth/writers/html.py:34: DeprecationWarning: cgi.escape is deprecated, use html.escape instead return cgi.escape(text, quote=True) ```
Python
bsd-2-clause
mwilliamson/python-mammoth
from __future__ import unicode_literals + from xml.sax.saxutils import escape from .abc import Writer - - import cgi class HtmlWriter(Writer): def __init__(self): self._fragments = [] def text(self, text): self._fragments.append(_escape_html(text)) def start(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1}>".format(name, attribute_string)) def end(self, name): self._fragments.append("</{0}>".format(name)) def self_closing(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1} />".format(name, attribute_string)) def append(self, html): self._fragments.append(html) def as_string(self): return "".join(self._fragments) def _escape_html(text): - return cgi.escape(text, quote=True) + return escape(text, {'"': "&quot;"}) def _generate_attribute_string(attributes): if attributes is None: return "" else: return "".join( ' {0}="{1}"'.format(key, _escape_html(attributes[key])) for key in sorted(attributes) )
Use xml.sax.saxutils.escape instead of deprecated cgi.escape
## Code Before: from __future__ import unicode_literals from .abc import Writer import cgi class HtmlWriter(Writer): def __init__(self): self._fragments = [] def text(self, text): self._fragments.append(_escape_html(text)) def start(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1}>".format(name, attribute_string)) def end(self, name): self._fragments.append("</{0}>".format(name)) def self_closing(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1} />".format(name, attribute_string)) def append(self, html): self._fragments.append(html) def as_string(self): return "".join(self._fragments) def _escape_html(text): return cgi.escape(text, quote=True) def _generate_attribute_string(attributes): if attributes is None: return "" else: return "".join( ' {0}="{1}"'.format(key, _escape_html(attributes[key])) for key in sorted(attributes) ) ## Instruction: Use xml.sax.saxutils.escape instead of deprecated cgi.escape ## Code After: from __future__ import unicode_literals from xml.sax.saxutils import escape from .abc import Writer class HtmlWriter(Writer): def __init__(self): self._fragments = [] def text(self, text): self._fragments.append(_escape_html(text)) def start(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1}>".format(name, attribute_string)) def end(self, name): self._fragments.append("</{0}>".format(name)) def self_closing(self, name, attributes=None): attribute_string = _generate_attribute_string(attributes) self._fragments.append("<{0}{1} />".format(name, attribute_string)) def append(self, html): self._fragments.append(html) def as_string(self): return "".join(self._fragments) def _escape_html(text): return escape(text, {'"': "&quot;"}) def _generate_attribute_string(attributes): if attributes is None: return "" else: return "".join( ' {0}="{1}"'.format(key, _escape_html(attributes[key])) for key in sorted(attributes) )
// ... existing code ... from __future__ import unicode_literals from xml.sax.saxutils import escape // ... modified code ... from .abc import Writer ... def _escape_html(text): return escape(text, {'"': "&quot;"}) // ... rest of the code ...
de3161d66ab0a5661d98ace04f5f0ae7c01062bf
smsgateway/utils.py
smsgateway/utils.py
import logging logger = logging.getLogger(__name__) def strspn(source, allowed): newchrs = [] for c in source: if c in allowed: newchrs.append(c) return u''.join(newchrs) def check_cell_phone_number(number): cleaned_number = strspn(number, u'+0123456789') if not u'+' in cleaned_number[:1]: cleaned_number = u'+%s' % cleaned_number return cleaned_number def truncate_sms(text, max_length=160): if len(text) <= max_length: return text else: logger.error("Trying to send an SMS that is too long: %s", text) return text[:max_length-3] + '...' def parse_sms(content): content = content.upper().strip() from smsgateway.backends.base import hook for keyword, subkeywords in hook.iteritems(): if content[:len(keyword)] == unicode(keyword): remainder = content[len(keyword):].strip() if '*' in subkeywords: parts = remainder.split(u' ') subkeyword = parts[0].strip() if subkeyword in subkeywords: return [keyword] + parts return keyword, remainder else: for subkeyword in subkeywords: if remainder[:len(subkeyword)] == unicode(subkeyword): subremainder = remainder[len(subkeyword):].strip() return [keyword, subkeyword] + subremainder.split() return None
import logging logger = logging.getLogger(__name__) def strspn(source, allowed): newchrs = [] for c in source: if c in allowed: newchrs.append(c) return u''.join(newchrs) def check_cell_phone_number(number): cleaned_number = strspn(number, u'0123456789') #if not u'+' in cleaned_number[:1]: # cleaned_number = u'+%s' % cleaned_number return int(cleaned_number) def truncate_sms(text, max_length=160): if len(text) <= max_length: return text else: logger.error("Trying to send an SMS that is too long: %s", text) return text[:max_length-3] + '...' def parse_sms(content): content = content.upper().strip() from smsgateway.backends.base import hook for keyword, subkeywords in hook.iteritems(): if content[:len(keyword)] == unicode(keyword): remainder = content[len(keyword):].strip() if '*' in subkeywords: parts = remainder.split(u' ') subkeyword = parts[0].strip() if subkeyword in subkeywords: return [keyword] + parts return keyword, remainder else: for subkeyword in subkeywords: if remainder[:len(subkeyword)] == unicode(subkeyword): subremainder = remainder[len(subkeyword):].strip() return [keyword, subkeyword] + subremainder.split() return None
Use international MSISDN format according to SMPP protocol spec: 4.2.6.1.1
Use international MSISDN format according to SMPP protocol spec: 4.2.6.1.1
Python
bsd-3-clause
peterayeni/django-smsgateway,peterayeni/django-smsgateway,mvpoland/django-smsgateway,mvpoland/django-smsgateway,peterayeni/django-smsgateway,mvpoland/django-smsgateway,peterayeni/django-smsgateway
import logging logger = logging.getLogger(__name__) def strspn(source, allowed): newchrs = [] for c in source: if c in allowed: newchrs.append(c) return u''.join(newchrs) def check_cell_phone_number(number): - cleaned_number = strspn(number, u'+0123456789') + cleaned_number = strspn(number, u'0123456789') - - if not u'+' in cleaned_number[:1]: + #if not u'+' in cleaned_number[:1]: - cleaned_number = u'+%s' % cleaned_number + # cleaned_number = u'+%s' % cleaned_number - - return cleaned_number + return int(cleaned_number) def truncate_sms(text, max_length=160): if len(text) <= max_length: return text else: logger.error("Trying to send an SMS that is too long: %s", text) return text[:max_length-3] + '...' def parse_sms(content): content = content.upper().strip() from smsgateway.backends.base import hook for keyword, subkeywords in hook.iteritems(): if content[:len(keyword)] == unicode(keyword): remainder = content[len(keyword):].strip() if '*' in subkeywords: parts = remainder.split(u' ') subkeyword = parts[0].strip() if subkeyword in subkeywords: return [keyword] + parts return keyword, remainder else: for subkeyword in subkeywords: if remainder[:len(subkeyword)] == unicode(subkeyword): subremainder = remainder[len(subkeyword):].strip() return [keyword, subkeyword] + subremainder.split() return None
Use international MSISDN format according to SMPP protocol spec: 4.2.6.1.1
## Code Before: import logging logger = logging.getLogger(__name__) def strspn(source, allowed): newchrs = [] for c in source: if c in allowed: newchrs.append(c) return u''.join(newchrs) def check_cell_phone_number(number): cleaned_number = strspn(number, u'+0123456789') if not u'+' in cleaned_number[:1]: cleaned_number = u'+%s' % cleaned_number return cleaned_number def truncate_sms(text, max_length=160): if len(text) <= max_length: return text else: logger.error("Trying to send an SMS that is too long: %s", text) return text[:max_length-3] + '...' def parse_sms(content): content = content.upper().strip() from smsgateway.backends.base import hook for keyword, subkeywords in hook.iteritems(): if content[:len(keyword)] == unicode(keyword): remainder = content[len(keyword):].strip() if '*' in subkeywords: parts = remainder.split(u' ') subkeyword = parts[0].strip() if subkeyword in subkeywords: return [keyword] + parts return keyword, remainder else: for subkeyword in subkeywords: if remainder[:len(subkeyword)] == unicode(subkeyword): subremainder = remainder[len(subkeyword):].strip() return [keyword, subkeyword] + subremainder.split() return None ## Instruction: Use international MSISDN format according to SMPP protocol spec: 4.2.6.1.1 ## Code After: import logging logger = logging.getLogger(__name__) def strspn(source, allowed): newchrs = [] for c in source: if c in allowed: newchrs.append(c) return u''.join(newchrs) def check_cell_phone_number(number): cleaned_number = strspn(number, u'0123456789') #if not u'+' in cleaned_number[:1]: # cleaned_number = u'+%s' % cleaned_number return int(cleaned_number) def truncate_sms(text, max_length=160): if len(text) <= max_length: return text else: logger.error("Trying to send an SMS that is too long: %s", text) return text[:max_length-3] + '...' def parse_sms(content): content = content.upper().strip() from smsgateway.backends.base import hook for keyword, subkeywords in hook.iteritems(): if content[:len(keyword)] == unicode(keyword): remainder = content[len(keyword):].strip() if '*' in subkeywords: parts = remainder.split(u' ') subkeyword = parts[0].strip() if subkeyword in subkeywords: return [keyword] + parts return keyword, remainder else: for subkeyword in subkeywords: if remainder[:len(subkeyword)] == unicode(subkeyword): subremainder = remainder[len(subkeyword):].strip() return [keyword, subkeyword] + subremainder.split() return None
... def check_cell_phone_number(number): cleaned_number = strspn(number, u'0123456789') #if not u'+' in cleaned_number[:1]: # cleaned_number = u'+%s' % cleaned_number return int(cleaned_number) ...
724d7235e546fb79009800700fd74328f8171b8c
src/etc/tidy.py
src/etc/tidy.py
import sys, fileinput, subprocess err=0 cols=78 try: result=subprocess.check_output([ "git", "config", "core.autocrlf" ]) autocrlf=result.strip() == b"true" except CalledProcessError: autocrlf=False def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") if not autocrlf and line.find('\r') != -1: report_err("CR character") line_len = len(line)-2 if autocrlf else len(line)-1 if line_len > cols: report_err("line longer than %d chars" % cols) sys.exit(err)
import sys, fileinput err=0 cols=78 def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") if line.find('\r') != -1: report_err("CR character") if len(line)-1 > cols: report_err("line longer than %d chars" % cols) sys.exit(err)
Revert "Don't complain about \r when core.autocrlf is on in Git"
Revert "Don't complain about \r when core.autocrlf is on in Git" This reverts commit 828afaa2fa4cc9e3e53bda0ae3073abfcfa151ca.
Python
apache-2.0
SiegeLord/rust,gifnksm/rust,kwantam/rust,defuz/rust,aidancully/rust,avdi/rust,sae-bom/rust,erickt/rust,pelmers/rust,avdi/rust,pythonesque/rust,Ryman/rust,carols10cents/rust,robertg/rust,aepsil0n/rust,kwantam/rust,jbclements/rust,aepsil0n/rust,kmcallister/rust,mihneadb/rust,andars/rust,pczarn/rust,pczarn/rust,krzysz00/rust,barosl/rust,richo/rust,omasanori/rust,LeoTestard/rust,rprichard/rust,0x73/rust,michaelballantyne/rust-gpu,sarojaba/rust-doc-korean,aneeshusa/rust,pelmers/rust,j16r/rust,P1start/rust,jroesch/rust,victorvde/rust,AerialX/rust,pshc/rust,SiegeLord/rust,kimroen/rust,mihneadb/rust,dinfuehr/rust,miniupnp/rust,gifnksm/rust,ktossell/rust,mdinger/rust,bombless/rust-docs-chinese,robertg/rust,michaelballantyne/rust-gpu,omasanori/rust,jashank/rust,KokaKiwi/rust,vhbit/rust,victorvde/rust,pythonesque/rust,seanrivera/rust,gifnksm/rust,servo/rust,j16r/rust,barosl/rust,P1start/rust,sae-bom/rust,TheNeikos/rust,AerialX/rust,michaelballantyne/rust-gpu,rohitjoshi/rust,robertg/rust,seanrivera/rust,l0kod/rust,michaelballantyne/rust-gpu,pshc/rust,vhbit/rust,hauleth/rust,aepsil0n/rust,pshc/rust,l0kod/rust,mdinger/rust,untitaker/rust,graydon/rust,aneeshusa/rust,ktossell/rust,defuz/rust,defuz/rust,aepsil0n/rust,pythonesque/rust,andars/rust,ebfull/rand,GBGamer/rust,KokaKiwi/rust,ktossell/rust,SiegeLord/rust,mihneadb/rust,barosl/rust,vhbit/rust,miniupnp/rust,reem/rust,aturon/rust,aturon/rust,Ryman/rust,carols10cents/rust,dwillmer/rust,GBGamer/rust,erickt/rust,quornian/rust,erickt/rust,P1start/rust,ebfull/rust,quornian/rust,ruud-v-a/rust,krzysz00/rust,omasanori/rust,aneeshusa/rust,vhbit/rust,cllns/rust,Ryman/rust,philyoon/rust,zaeleus/rust,mvdnes/rust,zaeleus/rust,jashank/rust,aneeshusa/rust,andars/rust,nwin/rust,kimroen/rust,jbclements/rust,miniupnp/rust,ejjeong/rust,XMPPwocky/rust,aturon/rust,nwin/rust,dwillmer/rust,kimroen/rust,kimroen/rust,nwin/rust,P1start/rust,quornian/rust,bombless/rust,aturon/rust,defuz/rust,barosl/rust,cllns/rust,jbclements/rust,ktossell/rust,j16r/rust,reem/rust,krzysz00/rust,richo/rust,ruud-v-a/rust,Ryman/rust,jroesch/rust,pczarn/rust,aturon/rust,XMPPwocky/rust,bombless/rust,TheNeikos/rust,pythonesque/rust,zachwick/rust,aneeshusa/rust,gifnksm/rust,sarojaba/rust-doc-korean,seanrivera/rust,AerialX/rust-rt-minimal,P1start/rust,KokaKiwi/rust,XMPPwocky/rust,dwillmer/rust,TheNeikos/rust,GBGamer/rust,waynenilsen/rand,servo/rust,aturon/rust,ruud-v-a/rust,kimroen/rust,aidancully/rust,emk/rust,AerialX/rust-rt-minimal,zachwick/rust,zubron/rust,mitsuhiko/rust,servo/rust,krzysz00/rust,miniupnp/rust,zubron/rust,robertg/rust,LeoTestard/rust,pczarn/rust,reem/rust,erickt/rust,zaeleus/rust,rprichard/rust,seanrivera/rust,omasanori/rust,AerialX/rust,fabricedesre/rust,jroesch/rust,philyoon/rust,mvdnes/rust,stepancheg/rust-ide-rust,AerialX/rust,LeoTestard/rust,stepancheg/rust-ide-rust,omasanori/rust,stepancheg/rust-ide-rust,GrahamDennis/rand,victorvde/rust,krzysz00/rust,hauleth/rust,nham/rust,jashank/rust,graydon/rust,jbclements/rust,servo/rust,jbclements/rust,ebfull/rust,servo/rust,pelmers/rust,krzysz00/rust,LeoTestard/rust,GBGamer/rust,sarojaba/rust-doc-korean,dwillmer/rust,stepancheg/rust-ide-rust,hauleth/rust,fabricedesre/rust,0x73/rust,jroesch/rust,mvdnes/rust,ruud-v-a/rust,michaelballantyne/rust-gpu,cllns/rust,miniupnp/rust,kwantam/rust,miniupnp/rust,untitaker/rust,richo/rust,hauleth/rust,mahkoh/rust,kimroen/rust,mahkoh/rust,jashank/rust,graydon/rust,mdinger/rust,defuz/rust,AerialX/rust-rt-minimal,aidancully/rust,LeoTestard/rust,kwantam/rust,quornian/rust,pelmers/rust,erickt/rust,avdi/rust,bhickey/rand,stepancheg/rust-ide-rust,carols10cents/rust,huonw/rand,cllns/rust,graydon/rust,andars/rust,kmcallister/rust,zubron/rust,dwillmer/rust,cllns/rust,vhbit/rust,zaeleus/rust,XMPPwocky/rust,cllns/rust,mahkoh/rust,aidancully/rust,mahkoh/rust,stepancheg/rust-ide-rust,jashank/rust,P1start/rust,GBGamer/rust,untitaker/rust,emk/rust,mihneadb/rust,SiegeLord/rust,bombless/rust,pczarn/rust,bombless/rust,carols10cents/rust,stepancheg/rust-ide-rust,jroesch/rust,jbclements/rust,emk/rust,kwantam/rust,j16r/rust,robertg/rust,seanrivera/rust,SiegeLord/rust,Ryman/rust,jroesch/rust,mvdnes/rust,aidancully/rust,richo/rust,untitaker/rust,hauleth/rust,0x73/rust,aepsil0n/rust,kmcallister/rust,AerialX/rust,emk/rust,pelmers/rust,emk/rust,zubron/rust,jbclements/rust,0x73/rust,LeoTestard/rust,dwillmer/rust,fabricedesre/rust,emk/rust,retep998/rand,jbclements/rust,GBGamer/rust,untitaker/rust,kmcallister/rust,jbclements/rust,jashank/rust,pythonesque/rust,AerialX/rust-rt-minimal,l0kod/rust,reem/rust,mitsuhiko/rust,nwin/rust,mitsuhiko/rust,kimroen/rust,rohitjoshi/rust,kmcallister/rust,AerialX/rust,erickt/rust,bombless/rust,bombless/rust,rprichard/rust,omasanori/rust,sarojaba/rust-doc-korean,achanda/rand,sae-bom/rust,andars/rust,nham/rust,kmcallister/rust,mitsuhiko/rust,ejjeong/rust,graydon/rust,philyoon/rust,zachwick/rust,ktossell/rust,victorvde/rust,dinfuehr/rust,AerialX/rust-rt-minimal,zachwick/rust,pelmers/rust,avdi/rust,pythonesque/rust,dinfuehr/rust,GBGamer/rust,mvdnes/rust,rohitjoshi/rust,quornian/rust,TheNeikos/rust,barosl/rust,pshc/rust,l0kod/rust,carols10cents/rust,mitsuhiko/rust,aturon/rust,P1start/rust,GBGamer/rust,defuz/rust,zaeleus/rust,nham/rust,ejjeong/rust,pythonesque/rust,reem/rust,jashank/rust,Ryman/rust,hauleth/rust,zaeleus/rust,SiegeLord/rust,barosl/rust,jashank/rust,servo/rust,sarojaba/rust-doc-korean,kmcallister/rust,aidancully/rust,mdinger/rust,nham/rust,philyoon/rust,servo/rust,SiegeLord/rust,jroesch/rust,KokaKiwi/rust,ktossell/rust,mdinger/rust,nwin/rust,untitaker/rust,avdi/rust,andars/rust,robertg/rust,sarojaba/rust-doc-korean,nham/rust,mitsuhiko/rust,dinfuehr/rust,j16r/rust,sae-bom/rust,mdinger/rust,philyoon/rust,pshc/rust,sae-bom/rust,nham/rust,Ryman/rust,zubron/rust,rprichard/rust,ejjeong/rust,mahkoh/rust,erickt/rust,ruud-v-a/rust,emk/rust,dwillmer/rust,mihneadb/rust,rohitjoshi/rust,pczarn/rust,sarojaba/rust-doc-korean,j16r/rust,TheNeikos/rust,AerialX/rust-rt-minimal,XMPPwocky/rust,dinfuehr/rust,nwin/rust,seanrivera/rust,0x73/rust,rprichard/rust,zubron/rust,shepmaster/rand,miniupnp/rust,l0kod/rust,KokaKiwi/rust,l0kod/rust,arthurprs/rand,0x73/rust,gifnksm/rust,mahkoh/rust,l0kod/rust,quornian/rust,KokaKiwi/rust,sae-bom/rust,nham/rust,fabricedesre/rust,richo/rust,pshc/rust,richo/rust,zachwick/rust,pshc/rust,fabricedesre/rust,ebfull/rust,barosl/rust,kwantam/rust,XMPPwocky/rust,nwin/rust,j16r/rust,reem/rust,victorvde/rust,ebfull/rust,rohitjoshi/rust,ejjeong/rust,gifnksm/rust,nwin/rust,miniupnp/rust,ebfull/rust,mitsuhiko/rust,philyoon/rust,l0kod/rust,vhbit/rust,jroesch/rust,rohitjoshi/rust,michaelballantyne/rust-gpu,zachwick/rust,pshc/rust,zubron/rust,bluss/rand,pczarn/rust,vhbit/rust,mihneadb/rust,ktossell/rust,michaelballantyne/rust-gpu,zubron/rust,dwillmer/rust,dinfuehr/rust,fabricedesre/rust,aneeshusa/rust,TheNeikos/rust,rprichard/rust,LeoTestard/rust,ejjeong/rust,carols10cents/rust,mvdnes/rust,graydon/rust,aepsil0n/rust,avdi/rust,fabricedesre/rust,ebfull/rust,victorvde/rust,0x73/rust,quornian/rust,ruud-v-a/rust,vhbit/rust
- import sys, fileinput, subprocess + import sys, fileinput err=0 cols=78 - - try: - result=subprocess.check_output([ "git", "config", "core.autocrlf" ]) - autocrlf=result.strip() == b"true" - except CalledProcessError: - autocrlf=False def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") - if not autocrlf and line.find('\r') != -1: + if line.find('\r') != -1: report_err("CR character") - line_len = len(line)-2 if autocrlf else len(line)-1 - if line_len > cols: + if len(line)-1 > cols: report_err("line longer than %d chars" % cols) sys.exit(err)
Revert "Don't complain about \r when core.autocrlf is on in Git"
## Code Before: import sys, fileinput, subprocess err=0 cols=78 try: result=subprocess.check_output([ "git", "config", "core.autocrlf" ]) autocrlf=result.strip() == b"true" except CalledProcessError: autocrlf=False def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") if not autocrlf and line.find('\r') != -1: report_err("CR character") line_len = len(line)-2 if autocrlf else len(line)-1 if line_len > cols: report_err("line longer than %d chars" % cols) sys.exit(err) ## Instruction: Revert "Don't complain about \r when core.autocrlf is on in Git" ## Code After: import sys, fileinput err=0 cols=78 def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1 for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): if line.find('\t') != -1 and fileinput.filename().find("Makefile") == -1: report_err("tab character") if line.find('\r') != -1: report_err("CR character") if len(line)-1 > cols: report_err("line longer than %d chars" % cols) sys.exit(err)
# ... existing code ... import sys, fileinput # ... modified code ... cols=78 ... if line.find('\r') != -1: report_err("CR character") ... if len(line)-1 > cols: report_err("line longer than %d chars" % cols) # ... rest of the code ...
9510a0da5a6fee780e16db8f128f7c24bdb579d4
tests/test_post_import_hooks.py
tests/test_post_import_hooks.py
from __future__ import print_function import unittest import wrapt class TestPostImportHooks(unittest.TestCase): def test_simple(self): invoked = [] @wrapt.when_imported('socket') def hook_socket(module): self.assertEqual(module.__name__, 'socket') invoked.append(1) self.assertEqual(len(invoked), 0) import socket self.assertEqual(len(invoked), 1) if __name__ == '__main__': unittest.main()
from __future__ import print_function import unittest import wrapt class TestPostImportHooks(unittest.TestCase): def test_simple(self): invoked = [] @wrapt.when_imported('this') def hook_this(module): self.assertEqual(module.__name__, 'this') invoked.append(1) self.assertEqual(len(invoked), 0) import this self.assertEqual(len(invoked), 1) if __name__ == '__main__': unittest.main()
Adjust test to use different module as socket imported by coverage tools.
Adjust test to use different module as socket imported by coverage tools.
Python
bsd-2-clause
linglaiyao1314/wrapt,pombredanne/python-lazy-object-proxy,linglaiyao1314/wrapt,pombredanne/wrapt,akash1808/wrapt,pombredanne/wrapt,github4ry/wrapt,wujuguang/wrapt,pombredanne/python-lazy-object-proxy,akash1808/wrapt,ionelmc/python-lazy-object-proxy,ionelmc/python-lazy-object-proxy,github4ry/wrapt,GrahamDumpleton/wrapt,GrahamDumpleton/wrapt,wujuguang/wrapt
from __future__ import print_function import unittest import wrapt class TestPostImportHooks(unittest.TestCase): def test_simple(self): invoked = [] - @wrapt.when_imported('socket') + @wrapt.when_imported('this') - def hook_socket(module): + def hook_this(module): - self.assertEqual(module.__name__, 'socket') + self.assertEqual(module.__name__, 'this') invoked.append(1) self.assertEqual(len(invoked), 0) - import socket + import this self.assertEqual(len(invoked), 1) if __name__ == '__main__': unittest.main()
Adjust test to use different module as socket imported by coverage tools.
## Code Before: from __future__ import print_function import unittest import wrapt class TestPostImportHooks(unittest.TestCase): def test_simple(self): invoked = [] @wrapt.when_imported('socket') def hook_socket(module): self.assertEqual(module.__name__, 'socket') invoked.append(1) self.assertEqual(len(invoked), 0) import socket self.assertEqual(len(invoked), 1) if __name__ == '__main__': unittest.main() ## Instruction: Adjust test to use different module as socket imported by coverage tools. ## Code After: from __future__ import print_function import unittest import wrapt class TestPostImportHooks(unittest.TestCase): def test_simple(self): invoked = [] @wrapt.when_imported('this') def hook_this(module): self.assertEqual(module.__name__, 'this') invoked.append(1) self.assertEqual(len(invoked), 0) import this self.assertEqual(len(invoked), 1) if __name__ == '__main__': unittest.main()
# ... existing code ... @wrapt.when_imported('this') def hook_this(module): self.assertEqual(module.__name__, 'this') invoked.append(1) # ... modified code ... import this # ... rest of the code ...
fba4fdf426b0a29ca06deb67587c2bd804adb017
tbgxmlutils/xmlutils.py
tbgxmlutils/xmlutils.py
from xml.dom import minidom import xml.etree.ElementTree as ET import xmltodict def add(k, parent=None, txt=None, attrs=None): if parent is None: handle = ET.Element(k) else: handle = ET.SubElement(parent, k) if txt: handle.text = unicode(txt) try: for k, v in attrs.iteritems(): handle.attrib[k] = v except AttributeError: pass return handle def etree2xml(e, encoding='UTF-8'): return ET.tostring(e, encoding=encoding) if encoding else ET.tostring(e) def pretty(xml=None, fn=None): if fn is not None: xml = minidom.parse(fn) elif not isinstance(xml, minidom.Document): xml = minidom.parseString(xml) return xml.toprettyxml(indent=' ') def xml_fn_to_json(fn): fh = open(fn, 'r') json = xmltodict.parse(fh.read()) return json
from xml.dom import minidom import lxml.etree as ET import xmltodict def add(k, parent=None, txt=None, attrs=None): if parent is None: handle = ET.Element(k) else: handle = ET.SubElement(parent, k) if txt: handle.text = unicode(txt) try: for k, v in attrs.iteritems(): handle.attrib[k] = v except AttributeError: pass return handle def etree2xml(e, encoding='UTF-8'): return ET.tostring(e, encoding=encoding) if encoding else ET.tostring(e) def pretty(xml=None, fn=None): if fn is not None: xml = minidom.parse(fn) elif not isinstance(xml, minidom.Document): xml = minidom.parseString(xml) return xml.toprettyxml(indent=' ') def xml_fn_to_json(fn): fh = open(fn, 'r') json = xmltodict.parse(fh.read()) return json
Use lxml instead of elementtree.
Use lxml instead of elementtree.
Python
mit
Schwarzschild/TBGXMLUtils
from xml.dom import minidom - import xml.etree.ElementTree as ET + import lxml.etree as ET import xmltodict def add(k, parent=None, txt=None, attrs=None): if parent is None: handle = ET.Element(k) else: handle = ET.SubElement(parent, k) if txt: handle.text = unicode(txt) try: for k, v in attrs.iteritems(): handle.attrib[k] = v except AttributeError: pass return handle def etree2xml(e, encoding='UTF-8'): return ET.tostring(e, encoding=encoding) if encoding else ET.tostring(e) def pretty(xml=None, fn=None): if fn is not None: xml = minidom.parse(fn) elif not isinstance(xml, minidom.Document): xml = minidom.parseString(xml) return xml.toprettyxml(indent=' ') def xml_fn_to_json(fn): fh = open(fn, 'r') json = xmltodict.parse(fh.read()) return json
Use lxml instead of elementtree.
## Code Before: from xml.dom import minidom import xml.etree.ElementTree as ET import xmltodict def add(k, parent=None, txt=None, attrs=None): if parent is None: handle = ET.Element(k) else: handle = ET.SubElement(parent, k) if txt: handle.text = unicode(txt) try: for k, v in attrs.iteritems(): handle.attrib[k] = v except AttributeError: pass return handle def etree2xml(e, encoding='UTF-8'): return ET.tostring(e, encoding=encoding) if encoding else ET.tostring(e) def pretty(xml=None, fn=None): if fn is not None: xml = minidom.parse(fn) elif not isinstance(xml, minidom.Document): xml = minidom.parseString(xml) return xml.toprettyxml(indent=' ') def xml_fn_to_json(fn): fh = open(fn, 'r') json = xmltodict.parse(fh.read()) return json ## Instruction: Use lxml instead of elementtree. ## Code After: from xml.dom import minidom import lxml.etree as ET import xmltodict def add(k, parent=None, txt=None, attrs=None): if parent is None: handle = ET.Element(k) else: handle = ET.SubElement(parent, k) if txt: handle.text = unicode(txt) try: for k, v in attrs.iteritems(): handle.attrib[k] = v except AttributeError: pass return handle def etree2xml(e, encoding='UTF-8'): return ET.tostring(e, encoding=encoding) if encoding else ET.tostring(e) def pretty(xml=None, fn=None): if fn is not None: xml = minidom.parse(fn) elif not isinstance(xml, minidom.Document): xml = minidom.parseString(xml) return xml.toprettyxml(indent=' ') def xml_fn_to_json(fn): fh = open(fn, 'r') json = xmltodict.parse(fh.read()) return json
... from xml.dom import minidom import lxml.etree as ET import xmltodict ...
ab5aac0c9b0e075901c4cd8dd5d134e79f0e0110
brasileirao/spiders/results_spider.py
brasileirao/spiders/results_spider.py
import scrapy import scrapy.selector from brasileirao.items import BrasileiraoItem import hashlib class ResultsSpider(scrapy.Spider): name = "results" start_urls = [ 'https://esporte.uol.com.br/futebol/campeonatos/brasileirao/jogos/', ] def parse(self, response): actual_round = 0 for rodada in response.css('.rodadas .confrontos li'): actual_round += 1 for game in rodada.css(".confronto"): home_team = game.css(".partida .time1") away_team = game.css(".partida .time2") item = BrasileiraoItem() item['rodada'] = actual_round item['home_team'] = home_team.css("abbr::attr(title)").extract_first().encode('utf8') item['away_team'] = away_team.css("abbr::attr(title)").extract_first().encode('utf8') item['home_score'] = home_team.css(".gols::text").extract_first() item['away_score'] = away_team.css(".gols::text").extract_first() item['date'] = game.css(".info-partida time::attr(datetime)").extract_first() id = item['home_team'] + item['away_team'] item['id'] = hashlib.md5(id).hexdigest() yield item
import scrapy import scrapy.selector from brasileirao.items import BrasileiraoItem import hashlib class ResultsSpider(scrapy.Spider): name = "results" start_urls = [ 'https://esporte.uol.com.br/futebol/campeonatos/brasileirao/jogos/', ] def parse(self, response): actual_round = 0 for rodada in response.css('.rodadas .confrontos li'): actual_round += 1 for game in rodada.css(".confronto"): home_team = game.css(".partida .time1") away_team = game.css(".partida .time2") item = BrasileiraoItem() item['rodada'] = actual_round item['home_team'] = home_team.css("abbr::attr(title)").extract_first() item['away_team'] = away_team.css("abbr::attr(title)").extract_first() item['home_score'] = home_team.css(".gols::text").extract_first() item['away_score'] = away_team.css(".gols::text").extract_first() item['date'] = game.css(".info-partida time::attr(datetime)").extract_first() id = item['home_team'] + item['away_team'] item['id'] = hashlib.md5(id).hexdigest() yield item
Set utf-8 as default encoding.
Set utf-8 as default encoding.
Python
mit
pghilardi/live-football-client
+ import scrapy import scrapy.selector from brasileirao.items import BrasileiraoItem import hashlib class ResultsSpider(scrapy.Spider): name = "results" start_urls = [ 'https://esporte.uol.com.br/futebol/campeonatos/brasileirao/jogos/', ] def parse(self, response): actual_round = 0 for rodada in response.css('.rodadas .confrontos li'): actual_round += 1 for game in rodada.css(".confronto"): home_team = game.css(".partida .time1") away_team = game.css(".partida .time2") item = BrasileiraoItem() item['rodada'] = actual_round - item['home_team'] = home_team.css("abbr::attr(title)").extract_first().encode('utf8') + item['home_team'] = home_team.css("abbr::attr(title)").extract_first() - item['away_team'] = away_team.css("abbr::attr(title)").extract_first().encode('utf8') + item['away_team'] = away_team.css("abbr::attr(title)").extract_first() item['home_score'] = home_team.css(".gols::text").extract_first() item['away_score'] = away_team.css(".gols::text").extract_first() item['date'] = game.css(".info-partida time::attr(datetime)").extract_first() id = item['home_team'] + item['away_team'] item['id'] = hashlib.md5(id).hexdigest() yield item
Set utf-8 as default encoding.
## Code Before: import scrapy import scrapy.selector from brasileirao.items import BrasileiraoItem import hashlib class ResultsSpider(scrapy.Spider): name = "results" start_urls = [ 'https://esporte.uol.com.br/futebol/campeonatos/brasileirao/jogos/', ] def parse(self, response): actual_round = 0 for rodada in response.css('.rodadas .confrontos li'): actual_round += 1 for game in rodada.css(".confronto"): home_team = game.css(".partida .time1") away_team = game.css(".partida .time2") item = BrasileiraoItem() item['rodada'] = actual_round item['home_team'] = home_team.css("abbr::attr(title)").extract_first().encode('utf8') item['away_team'] = away_team.css("abbr::attr(title)").extract_first().encode('utf8') item['home_score'] = home_team.css(".gols::text").extract_first() item['away_score'] = away_team.css(".gols::text").extract_first() item['date'] = game.css(".info-partida time::attr(datetime)").extract_first() id = item['home_team'] + item['away_team'] item['id'] = hashlib.md5(id).hexdigest() yield item ## Instruction: Set utf-8 as default encoding. ## Code After: import scrapy import scrapy.selector from brasileirao.items import BrasileiraoItem import hashlib class ResultsSpider(scrapy.Spider): name = "results" start_urls = [ 'https://esporte.uol.com.br/futebol/campeonatos/brasileirao/jogos/', ] def parse(self, response): actual_round = 0 for rodada in response.css('.rodadas .confrontos li'): actual_round += 1 for game in rodada.css(".confronto"): home_team = game.css(".partida .time1") away_team = game.css(".partida .time2") item = BrasileiraoItem() item['rodada'] = actual_round item['home_team'] = home_team.css("abbr::attr(title)").extract_first() item['away_team'] = away_team.css("abbr::attr(title)").extract_first() item['home_score'] = home_team.css(".gols::text").extract_first() item['away_score'] = away_team.css(".gols::text").extract_first() item['date'] = game.css(".info-partida time::attr(datetime)").extract_first() id = item['home_team'] + item['away_team'] item['id'] = hashlib.md5(id).hexdigest() yield item
... import scrapy ... item['rodada'] = actual_round item['home_team'] = home_team.css("abbr::attr(title)").extract_first() item['away_team'] = away_team.css("abbr::attr(title)").extract_first() item['home_score'] = home_team.css(".gols::text").extract_first() ...
cddc9b20855147541859976229e1dc34a611de26
twitterfunctions.py
twitterfunctions.py
import tweepy def authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET): # Authenticate with Twitter using keys and secrets and return # an 'api' object # Authorize with consumer credentials and get an access token # with access credentials auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) # get an authenticated instance of the API class api = tweepy.API(auth) # return API object 'api' return api def sendtweet(api, tweet): # Send 'tweet' using Tweepy API function api.update_status(tweet)
import tweepy def authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET): # Authenticate with Twitter using keys and secrets and return # an 'api' object # Authorize with consumer credentials and get an access token # with access credentials auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) # get an authenticated instance of the API class api = tweepy.API(auth) # return API object 'api' return api def sendtweet(api, tweet): # Send 'tweet' using Tweepy API function api.update_status(status=tweet)
Change the api.update_status() call to explicitly state the 'status' message.
Change the api.update_status() call to explicitly state the 'status' message. - A recent version of Tweepy required it to be explicit, no harm in always being so
Python
agpl-3.0
pattonwebz/ScheduledTweetBot
import tweepy def authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET): # Authenticate with Twitter using keys and secrets and return # an 'api' object - + - # Authorize with consumer credentials and get an access token + # Authorize with consumer credentials and get an access token # with access credentials auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) - + # get an authenticated instance of the API class api = tweepy.API(auth) - + # return API object 'api' return api - + def sendtweet(api, tweet): # Send 'tweet' using Tweepy API function - api.update_status(tweet) + api.update_status(status=tweet) -
Change the api.update_status() call to explicitly state the 'status' message.
## Code Before: import tweepy def authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET): # Authenticate with Twitter using keys and secrets and return # an 'api' object # Authorize with consumer credentials and get an access token # with access credentials auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) # get an authenticated instance of the API class api = tweepy.API(auth) # return API object 'api' return api def sendtweet(api, tweet): # Send 'tweet' using Tweepy API function api.update_status(tweet) ## Instruction: Change the api.update_status() call to explicitly state the 'status' message. ## Code After: import tweepy def authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET): # Authenticate with Twitter using keys and secrets and return # an 'api' object # Authorize with consumer credentials and get an access token # with access credentials auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) # get an authenticated instance of the API class api = tweepy.API(auth) # return API object 'api' return api def sendtweet(api, tweet): # Send 'tweet' using Tweepy API function api.update_status(status=tweet)
// ... existing code ... # an 'api' object # Authorize with consumer credentials and get an access token # with access credentials // ... modified code ... auth.set_access_token(ACCESS_KEY, ACCESS_SECRET) # get an authenticated instance of the API class ... api = tweepy.API(auth) # return API object 'api' ... return api ... # Send 'tweet' using Tweepy API function api.update_status(status=tweet) // ... rest of the code ...
db19dfa17261c3d04de0202b2809ba8abb70326b
tests/unit/test_moxstubout.py
tests/unit/test_moxstubout.py
from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2)
from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2) f._clear_cleanups()
Fix build break with Fixtures 1.3
Fix build break with Fixtures 1.3 Our explicit call to cleanUp messes things up in latest fixture, so we need to call _clear_cleanups to stop the test from breaking Change-Id: I8ce2309a94736b47fb347f37ab4027857e19c8a8
Python
apache-2.0
openstack/oslotest,openstack/oslotest
from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2) + f._clear_cleanups()
Fix build break with Fixtures 1.3
## Code Before: from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2) ## Instruction: Fix build break with Fixtures 1.3 ## Code After: from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2) f._clear_cleanups()
# ... existing code ... self.assertEqual(before, after2) f._clear_cleanups() # ... rest of the code ...
10e26b52f94bb1a6345d2c1540a0a09a82b7831c
baseflask/refresh_varsnap.py
baseflask/refresh_varsnap.py
import os from syspath import git_root # NOQA from app import serve os.environ['ENV'] = 'production' app = serve.app.test_client() app.get('/') app.get('/health') app.get('/robots.txt') app.get('/asdf')
import os from syspath import git_root # NOQA from app import serve os.environ['ENV'] = 'production' app = serve.app.test_client() app.get('/') app.get('/health') app.get('/humans.txt') app.get('/robots.txt') app.get('/.well-known/security.txt') app.get('/asdf')
Update varsnap refresh with new endpoints
Update varsnap refresh with new endpoints
Python
mit
albertyw/base-flask,albertyw/base-flask,albertyw/base-flask,albertyw/base-flask
import os from syspath import git_root # NOQA from app import serve os.environ['ENV'] = 'production' app = serve.app.test_client() app.get('/') app.get('/health') + app.get('/humans.txt') app.get('/robots.txt') + app.get('/.well-known/security.txt') app.get('/asdf')
Update varsnap refresh with new endpoints
## Code Before: import os from syspath import git_root # NOQA from app import serve os.environ['ENV'] = 'production' app = serve.app.test_client() app.get('/') app.get('/health') app.get('/robots.txt') app.get('/asdf') ## Instruction: Update varsnap refresh with new endpoints ## Code After: import os from syspath import git_root # NOQA from app import serve os.environ['ENV'] = 'production' app = serve.app.test_client() app.get('/') app.get('/health') app.get('/humans.txt') app.get('/robots.txt') app.get('/.well-known/security.txt') app.get('/asdf')
... app.get('/health') app.get('/humans.txt') app.get('/robots.txt') app.get('/.well-known/security.txt') app.get('/asdf') ...
7b77297f9099019f4424c7115deb933dd51eaf80
setup.py
setup.py
from distutils.core import setup, Extension setup( name = 'Encoder', version = '1.0', description = 'Encode stuff', ext_modules = [ Extension( name = '_encoder', sources = [ 'src/encoder.c', 'src/module.c', ], include_dirs = [ 'include', ], ), ], )
from distutils.core import setup, Extension setup( name = 'Encoder', version = '1.0', description = 'Encode stuff', ext_modules = [ Extension( name = '_encoder', sources = [ 'src/encoder.c', 'src/module.c', ], include_dirs = [ 'include', ], depends = [ 'include/buffer.h', # As this is essentially a source file ], ), ], )
Include buffer.h as a dependency for rebuilds
Include buffer.h as a dependency for rebuilds
Python
apache-2.0
blake-sheridan/py-serializer,blake-sheridan/py-serializer
from distutils.core import setup, Extension setup( name = 'Encoder', version = '1.0', description = 'Encode stuff', ext_modules = [ Extension( name = '_encoder', sources = [ 'src/encoder.c', 'src/module.c', ], include_dirs = [ 'include', ], + depends = [ + 'include/buffer.h', # As this is essentially a source file + ], ), ], )
Include buffer.h as a dependency for rebuilds
## Code Before: from distutils.core import setup, Extension setup( name = 'Encoder', version = '1.0', description = 'Encode stuff', ext_modules = [ Extension( name = '_encoder', sources = [ 'src/encoder.c', 'src/module.c', ], include_dirs = [ 'include', ], ), ], ) ## Instruction: Include buffer.h as a dependency for rebuilds ## Code After: from distutils.core import setup, Extension setup( name = 'Encoder', version = '1.0', description = 'Encode stuff', ext_modules = [ Extension( name = '_encoder', sources = [ 'src/encoder.c', 'src/module.c', ], include_dirs = [ 'include', ], depends = [ 'include/buffer.h', # As this is essentially a source file ], ), ], )
# ... existing code ... ], depends = [ 'include/buffer.h', # As this is essentially a source file ], ), # ... rest of the code ...
98dd8df628079357b26a663d24adcbc6ac4d3794
indra/__init__.py
indra/__init__.py
from __future__ import print_function, unicode_literals import logging __version__ = '1.3.0' logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s', level=logging.INFO) logging.getLogger('requests').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('rdflib').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL)
from __future__ import print_function, unicode_literals import logging __version__ = '1.3.0' __all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser', 'databases', 'literature', 'preassembler', 'assemblers', 'mechlinker', 'belief', 'tools', 'util'] ''' ############# # For now these imports are disabled because # (1) Every import would load everything in INDRA which is time consuming and # (2) Optional dependencies in some modules will try to be loaded even if # they are not intended to be used ################## # Core import statements # Input processors from indra import bel from indra import biopax from indra import trips from indra import reach from indra import index_cards # Clients from indra import databases from indra import literature # Assemblers from indra import preassembler from indra import assemblers from indra import mechlinker from indra import belief # Tools and utils from indra import tools from indra import util ''' logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s', level=logging.INFO) logging.getLogger('requests').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('rdflib').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL)
Add commented out top-level imports
Add commented out top-level imports
Python
bsd-2-clause
pvtodorov/indra,sorgerlab/belpy,jmuhlich/indra,johnbachman/belpy,jmuhlich/indra,sorgerlab/indra,pvtodorov/indra,bgyori/indra,johnbachman/indra,jmuhlich/indra,sorgerlab/belpy,sorgerlab/indra,pvtodorov/indra,bgyori/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,pvtodorov/indra,johnbachman/belpy,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra
from __future__ import print_function, unicode_literals import logging __version__ = '1.3.0' + + __all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser', + 'databases', 'literature', + 'preassembler', 'assemblers', 'mechlinker', 'belief', + 'tools', 'util'] + ''' + ############# + # For now these imports are disabled because + # (1) Every import would load everything in INDRA which is time consuming and + # (2) Optional dependencies in some modules will try to be loaded even if + # they are not intended to be used + ################## + # Core + import statements + # Input processors + from indra import bel + from indra import biopax + from indra import trips + from indra import reach + from indra import index_cards + # Clients + from indra import databases + from indra import literature + # Assemblers + from indra import preassembler + from indra import assemblers + from indra import mechlinker + from indra import belief + # Tools and utils + from indra import tools + from indra import util + ''' logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s', level=logging.INFO) logging.getLogger('requests').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('rdflib').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL)
Add commented out top-level imports
## Code Before: from __future__ import print_function, unicode_literals import logging __version__ = '1.3.0' logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s', level=logging.INFO) logging.getLogger('requests').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('rdflib').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL) ## Instruction: Add commented out top-level imports ## Code After: from __future__ import print_function, unicode_literals import logging __version__ = '1.3.0' __all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser', 'databases', 'literature', 'preassembler', 'assemblers', 'mechlinker', 'belief', 'tools', 'util'] ''' ############# # For now these imports are disabled because # (1) Every import would load everything in INDRA which is time consuming and # (2) Optional dependencies in some modules will try to be loaded even if # they are not intended to be used ################## # Core import statements # Input processors from indra import bel from indra import biopax from indra import trips from indra import reach from indra import index_cards # Clients from indra import databases from indra import literature # Assemblers from indra import preassembler from indra import assemblers from indra import mechlinker from indra import belief # Tools and utils from indra import tools from indra import util ''' logging.basicConfig(format='%(levelname)s: indra/%(name)s - %(message)s', level=logging.INFO) logging.getLogger('requests').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('rdflib').setLevel(logging.ERROR) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL)
... __version__ = '1.3.0' __all__ = ['bel', 'biopax', 'trips', 'reach', 'index_cards', 'sparser', 'databases', 'literature', 'preassembler', 'assemblers', 'mechlinker', 'belief', 'tools', 'util'] ''' ############# # For now these imports are disabled because # (1) Every import would load everything in INDRA which is time consuming and # (2) Optional dependencies in some modules will try to be loaded even if # they are not intended to be used ################## # Core import statements # Input processors from indra import bel from indra import biopax from indra import trips from indra import reach from indra import index_cards # Clients from indra import databases from indra import literature # Assemblers from indra import preassembler from indra import assemblers from indra import mechlinker from indra import belief # Tools and utils from indra import tools from indra import util ''' ...
f813f72ad02bbf4b8e4ad0b190064879f6c3df3e
toolbox/metrics.py
toolbox/metrics.py
import keras.backend as K import numpy as np def psnr(y_true, y_pred): """Peak signal-to-noise ratio averaged over samples and channels.""" mse = K.mean(K.square(y_true - y_pred), axis=(1, 2)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) def ssim(y_true, y_pred): """structural similarity measurement system.""" ## K1, K2 are two constants, much smaller than 1 K1 = 0.04 K2 = 0.06 ## mean, std, correlation mu_x = K.mean(y_pred) mu_y = K.mean(y_true) sig_x = K.std(y_pred) sig_y = K.std(y_true) sig_xy = (sig_x * sig_y) ** 0.5 ## L, number of pixels, C1, C2, two constants L = 33 C1 = (K1 * L) ** 2 C2 = (K2 * L) ** 2 ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) return ssim
import keras.backend as K import numpy as np def psnr(y_true, y_pred): """Peak signal-to-noise ratio averaged over samples.""" mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2, -1)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) def ssim(y_true, y_pred): """structural similarity measurement system.""" ## K1, K2 are two constants, much smaller than 1 K1 = 0.04 K2 = 0.06 ## mean, std, correlation mu_x = K.mean(y_pred) mu_y = K.mean(y_true) sig_x = K.std(y_pred) sig_y = K.std(y_true) sig_xy = (sig_x * sig_y) ** 0.5 ## L, number of pixels, C1, C2, two constants L = 33 C1 = (K1 * L) ** 2 C2 = (K2 * L) ** 2 ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) return ssim
Change how PSNR is computed
Change how PSNR is computed
Python
mit
qobilidop/srcnn,qobilidop/srcnn
import keras.backend as K import numpy as np def psnr(y_true, y_pred): - """Peak signal-to-noise ratio averaged over samples and channels.""" + """Peak signal-to-noise ratio averaged over samples.""" - mse = K.mean(K.square(y_true - y_pred), axis=(1, 2)) + mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2, -1)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) def ssim(y_true, y_pred): """structural similarity measurement system.""" ## K1, K2 are two constants, much smaller than 1 K1 = 0.04 K2 = 0.06 ## mean, std, correlation mu_x = K.mean(y_pred) mu_y = K.mean(y_true) sig_x = K.std(y_pred) sig_y = K.std(y_true) sig_xy = (sig_x * sig_y) ** 0.5 ## L, number of pixels, C1, C2, two constants L = 33 C1 = (K1 * L) ** 2 C2 = (K2 * L) ** 2 ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) return ssim
Change how PSNR is computed
## Code Before: import keras.backend as K import numpy as np def psnr(y_true, y_pred): """Peak signal-to-noise ratio averaged over samples and channels.""" mse = K.mean(K.square(y_true - y_pred), axis=(1, 2)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) def ssim(y_true, y_pred): """structural similarity measurement system.""" ## K1, K2 are two constants, much smaller than 1 K1 = 0.04 K2 = 0.06 ## mean, std, correlation mu_x = K.mean(y_pred) mu_y = K.mean(y_true) sig_x = K.std(y_pred) sig_y = K.std(y_true) sig_xy = (sig_x * sig_y) ** 0.5 ## L, number of pixels, C1, C2, two constants L = 33 C1 = (K1 * L) ** 2 C2 = (K2 * L) ** 2 ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) return ssim ## Instruction: Change how PSNR is computed ## Code After: import keras.backend as K import numpy as np def psnr(y_true, y_pred): """Peak signal-to-noise ratio averaged over samples.""" mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2, -1)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) def ssim(y_true, y_pred): """structural similarity measurement system.""" ## K1, K2 are two constants, much smaller than 1 K1 = 0.04 K2 = 0.06 ## mean, std, correlation mu_x = K.mean(y_pred) mu_y = K.mean(y_true) sig_x = K.std(y_pred) sig_y = K.std(y_true) sig_xy = (sig_x * sig_y) ** 0.5 ## L, number of pixels, C1, C2, two constants L = 33 C1 = (K1 * L) ** 2 C2 = (K2 * L) ** 2 ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2)) return ssim
// ... existing code ... def psnr(y_true, y_pred): """Peak signal-to-noise ratio averaged over samples.""" mse = K.mean(K.square(y_true - y_pred), axis=(-3, -2, -1)) return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) // ... rest of the code ...
da16bec07e245e440acea629ad953e4a56085f7e
scripts/util.py
scripts/util.py
import time import logging from cassandra.cqlengine.query import Token from scrapi.database import _manager from scrapi.processing.cassandra import DocumentModel _manager.setup() logger = logging.getLogger(__name__) def documents(*sources): q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000) querysets = (q.filter(source=source) for source in sources) if sources else [q] for query in querysets: page = try_forever(list, query) while len(page) > 0: for doc in page: yield doc page = try_forever(next_page, query, page) def next_page(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) def try_forever(action, *args, **kwargs): while True: try: return action(*args, **kwargs) except Exception as e: logger.exception(e) time.sleep(5) logger.info("Trying again...")
import time import logging from cassandra.cqlengine.query import Token from scrapi.database import _manager from scrapi.processing.cassandra import DocumentModel, DocumentModelV2 _manager.setup() logger = logging.getLogger(__name__) def ModelIteratorFactory(model, next_page): def model_iterator(*sources): q = model.objects.timeout(500).allow_filtering().all().limit(1000) querysets = (q.filter(source=source) for source in sources) if sources else [q] for query in querysets: page = try_forever(list, query) while len(page) > 0: for doc in page: yield doc page = try_forever(next_page, query, page) return model_iterator def next_page_v1(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) def next_page_v2(query, page): return list(query.filter(docID__gt=page[-1].docID)) documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1) documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2) def try_forever(action, *args, **kwargs): while True: try: return action(*args, **kwargs) except Exception as e: logger.exception(e) time.sleep(5) logger.info("Trying again...")
Add ability to iterate over old document model and new document model for migrations
Add ability to iterate over old document model and new document model for migrations
Python
apache-2.0
CenterForOpenScience/scrapi,ostwald/scrapi,fabianvf/scrapi,mehanig/scrapi,alexgarciac/scrapi,felliott/scrapi,icereval/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,mehanig/scrapi,erinspace/scrapi,felliott/scrapi,jeffreyliu3230/scrapi,erinspace/scrapi
import time import logging from cassandra.cqlengine.query import Token from scrapi.database import _manager - from scrapi.processing.cassandra import DocumentModel + from scrapi.processing.cassandra import DocumentModel, DocumentModelV2 _manager.setup() logger = logging.getLogger(__name__) - def documents(*sources): + def ModelIteratorFactory(model, next_page): + def model_iterator(*sources): - q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000) + q = model.objects.timeout(500).allow_filtering().all().limit(1000) - querysets = (q.filter(source=source) for source in sources) if sources else [q] + querysets = (q.filter(source=source) for source in sources) if sources else [q] - for query in querysets: + for query in querysets: - page = try_forever(list, query) + page = try_forever(list, query) - while len(page) > 0: + while len(page) > 0: - for doc in page: + for doc in page: - yield doc + yield doc - page = try_forever(next_page, query, page) + page = try_forever(next_page, query, page) + return model_iterator - def next_page(query, page): + def next_page_v1(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) + + + def next_page_v2(query, page): + return list(query.filter(docID__gt=page[-1].docID)) + + documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1) + documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2) def try_forever(action, *args, **kwargs): while True: try: return action(*args, **kwargs) except Exception as e: logger.exception(e) time.sleep(5) logger.info("Trying again...")
Add ability to iterate over old document model and new document model for migrations
## Code Before: import time import logging from cassandra.cqlengine.query import Token from scrapi.database import _manager from scrapi.processing.cassandra import DocumentModel _manager.setup() logger = logging.getLogger(__name__) def documents(*sources): q = DocumentModel.objects.timeout(500).allow_filtering().all().limit(1000) querysets = (q.filter(source=source) for source in sources) if sources else [q] for query in querysets: page = try_forever(list, query) while len(page) > 0: for doc in page: yield doc page = try_forever(next_page, query, page) def next_page(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) def try_forever(action, *args, **kwargs): while True: try: return action(*args, **kwargs) except Exception as e: logger.exception(e) time.sleep(5) logger.info("Trying again...") ## Instruction: Add ability to iterate over old document model and new document model for migrations ## Code After: import time import logging from cassandra.cqlengine.query import Token from scrapi.database import _manager from scrapi.processing.cassandra import DocumentModel, DocumentModelV2 _manager.setup() logger = logging.getLogger(__name__) def ModelIteratorFactory(model, next_page): def model_iterator(*sources): q = model.objects.timeout(500).allow_filtering().all().limit(1000) querysets = (q.filter(source=source) for source in sources) if sources else [q] for query in querysets: page = try_forever(list, query) while len(page) > 0: for doc in page: yield doc page = try_forever(next_page, query, page) return model_iterator def next_page_v1(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) def next_page_v2(query, page): return list(query.filter(docID__gt=page[-1].docID)) documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1) documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2) def try_forever(action, *args, **kwargs): while True: try: return action(*args, **kwargs) except Exception as e: logger.exception(e) time.sleep(5) logger.info("Trying again...")
# ... existing code ... from scrapi.database import _manager from scrapi.processing.cassandra import DocumentModel, DocumentModelV2 # ... modified code ... def ModelIteratorFactory(model, next_page): def model_iterator(*sources): q = model.objects.timeout(500).allow_filtering().all().limit(1000) querysets = (q.filter(source=source) for source in sources) if sources else [q] for query in querysets: page = try_forever(list, query) while len(page) > 0: for doc in page: yield doc page = try_forever(next_page, query, page) return model_iterator ... def next_page_v1(query, page): return list(query.filter(pk__token__gt=Token(page[-1].pk))) def next_page_v2(query, page): return list(query.filter(docID__gt=page[-1].docID)) documents_v1 = ModelIteratorFactory(DocumentModel, next_page_v1) documents_v2 = ModelIteratorFactory(DocumentModelV2, next_page_v2) # ... rest of the code ...
8ee35fe46e978fcb17e99b50f045009ea8235067
tools/pdtools/pdtools/devices/camera.py
tools/pdtools/pdtools/devices/camera.py
import base64 import requests import six class Camera(object): def __init__(self, host): self.host = host def get_image(self): """ Get an image from the camera. Returns image data as a BytesIO/StringIO object. """ url = "http://{}/image.jpg".format(self.host) encoded = base64.b64encode('admin:'.encode('utf-8')).decode('ascii') headers = { 'Authorization': 'Basic ' + encoded } result = requests.get(url, headers=headers) if result.ok: return six.BytesIO(result.content) else: return None
import base64 import requests import six class Camera(object): def __init__(self, host): self.host = host def __repr__(self): return "Camera({})".format(self.host) def get_image(self): """ Get an image from the camera. Returns image data as a BytesIO/StringIO object. """ url = "http://{}/image.jpg".format(self.host) encoded = base64.b64encode('admin:'.encode('utf-8')).decode('ascii') headers = { 'Authorization': 'Basic ' + encoded } result = requests.get(url, headers=headers) if result.ok: return six.BytesIO(result.content) else: return None
Define __repr__ for pdtools Camera class.
Define __repr__ for pdtools Camera class.
Python
apache-2.0
ParadropLabs/Paradrop,ParadropLabs/Paradrop,ParadropLabs/Paradrop
import base64 import requests import six class Camera(object): def __init__(self, host): self.host = host + + def __repr__(self): + return "Camera({})".format(self.host) def get_image(self): """ Get an image from the camera. Returns image data as a BytesIO/StringIO object. """ url = "http://{}/image.jpg".format(self.host) encoded = base64.b64encode('admin:'.encode('utf-8')).decode('ascii') headers = { 'Authorization': 'Basic ' + encoded } result = requests.get(url, headers=headers) if result.ok: return six.BytesIO(result.content) else: return None
Define __repr__ for pdtools Camera class.
## Code Before: import base64 import requests import six class Camera(object): def __init__(self, host): self.host = host def get_image(self): """ Get an image from the camera. Returns image data as a BytesIO/StringIO object. """ url = "http://{}/image.jpg".format(self.host) encoded = base64.b64encode('admin:'.encode('utf-8')).decode('ascii') headers = { 'Authorization': 'Basic ' + encoded } result = requests.get(url, headers=headers) if result.ok: return six.BytesIO(result.content) else: return None ## Instruction: Define __repr__ for pdtools Camera class. ## Code After: import base64 import requests import six class Camera(object): def __init__(self, host): self.host = host def __repr__(self): return "Camera({})".format(self.host) def get_image(self): """ Get an image from the camera. Returns image data as a BytesIO/StringIO object. """ url = "http://{}/image.jpg".format(self.host) encoded = base64.b64encode('admin:'.encode('utf-8')).decode('ascii') headers = { 'Authorization': 'Basic ' + encoded } result = requests.get(url, headers=headers) if result.ok: return six.BytesIO(result.content) else: return None
... self.host = host def __repr__(self): return "Camera({})".format(self.host) ...
898028dea2e04d52c32854752bda34d331c7696f
ynr/apps/candidatebot/management/commands/candidatebot_import_email_from_csv.py
ynr/apps/candidatebot/management/commands/candidatebot_import_email_from_csv.py
from __future__ import unicode_literals import csv from django.core.management.base import BaseCommand from candidatebot.helpers import CandidateBot from popolo.models import Person class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'filename', help='Path to the file with the email addresses' ) parser.add_argument( '--source', help='Source of the data. The source CSV column takes precedence' ) def handle(self, **options): with open(options['filename'], 'r') as fh: reader = csv.DictReader(fh) for row in reader: source = row.get('source', options.get('source')) if not row['democlub_id']: continue if not source: raise ValueError("A source is required") try: bot = CandidateBot(row['democlub_id']) bot.add_email(row['email']) bot.save(source) # print(person) except Person.DoesNotExist: print("Person ID {} not found".format( row['democlub_id'])) # print(row)
from __future__ import unicode_literals import csv from django.core.management.base import BaseCommand from candidatebot.helpers import CandidateBot from popolo.models import Person class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'filename', help='Path to the file with the email addresses' ) parser.add_argument( '--source', help='Source of the data. The source CSV column takes precedence' ) def handle(self, **options): with open(options['filename'], 'r') as fh: reader = csv.DictReader(fh) for row in reader: source = row.get('source', options.get('source')) if not row['democlub_id']: continue if not source: raise ValueError("A source is required") try: bot = CandidateBot(row['democlub_id']) try: bot.add_email(row['email']) bot.save(source) except ValueError: #Email exists, move on pass except Person.DoesNotExist: print("Person ID {} not found".format( row['democlub_id'])) # print(row)
Move on if email exists
Move on if email exists
Python
agpl-3.0
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
from __future__ import unicode_literals import csv from django.core.management.base import BaseCommand from candidatebot.helpers import CandidateBot from popolo.models import Person class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'filename', help='Path to the file with the email addresses' ) parser.add_argument( '--source', help='Source of the data. The source CSV column takes precedence' ) def handle(self, **options): with open(options['filename'], 'r') as fh: reader = csv.DictReader(fh) for row in reader: source = row.get('source', options.get('source')) if not row['democlub_id']: continue if not source: raise ValueError("A source is required") try: bot = CandidateBot(row['democlub_id']) + try: - bot.add_email(row['email']) + bot.add_email(row['email']) - bot.save(source) + bot.save(source) - # print(person) + except ValueError: + #Email exists, move on + pass except Person.DoesNotExist: print("Person ID {} not found".format( row['democlub_id'])) # print(row)
Move on if email exists
## Code Before: from __future__ import unicode_literals import csv from django.core.management.base import BaseCommand from candidatebot.helpers import CandidateBot from popolo.models import Person class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'filename', help='Path to the file with the email addresses' ) parser.add_argument( '--source', help='Source of the data. The source CSV column takes precedence' ) def handle(self, **options): with open(options['filename'], 'r') as fh: reader = csv.DictReader(fh) for row in reader: source = row.get('source', options.get('source')) if not row['democlub_id']: continue if not source: raise ValueError("A source is required") try: bot = CandidateBot(row['democlub_id']) bot.add_email(row['email']) bot.save(source) # print(person) except Person.DoesNotExist: print("Person ID {} not found".format( row['democlub_id'])) # print(row) ## Instruction: Move on if email exists ## Code After: from __future__ import unicode_literals import csv from django.core.management.base import BaseCommand from candidatebot.helpers import CandidateBot from popolo.models import Person class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'filename', help='Path to the file with the email addresses' ) parser.add_argument( '--source', help='Source of the data. The source CSV column takes precedence' ) def handle(self, **options): with open(options['filename'], 'r') as fh: reader = csv.DictReader(fh) for row in reader: source = row.get('source', options.get('source')) if not row['democlub_id']: continue if not source: raise ValueError("A source is required") try: bot = CandidateBot(row['democlub_id']) try: bot.add_email(row['email']) bot.save(source) except ValueError: #Email exists, move on pass except Person.DoesNotExist: print("Person ID {} not found".format( row['democlub_id'])) # print(row)
# ... existing code ... bot = CandidateBot(row['democlub_id']) try: bot.add_email(row['email']) bot.save(source) except ValueError: #Email exists, move on pass except Person.DoesNotExist: # ... rest of the code ...
564ae1eb637ec509f37ade93d4079117cc73fd58
lab_assistant/storage/__init__.py
lab_assistant/storage/__init__.py
from copy import deepcopy from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: return get_storage._cache[path] Storage = utils.import_path(path) get_storage._cache[path] = Storage(name, **opts) return get_storage._cache[path] get_storage._cache = {} def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
from copy import deepcopy from collections import defaultdict from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: if name in get_storage._cache[path]: return get_storage._cache[path][name] Storage = utils.import_path(path) get_storage._cache[path].update({ name: Storage(name, **opts) }) return get_storage._cache[path][name] get_storage._cache = defaultdict(dict) def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
Fix get_storage cache to hold separate entries for each experiment key
Fix get_storage cache to hold separate entries for each experiment key
Python
mit
joealcorn/lab_assistant
from copy import deepcopy + from collections import defaultdict from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: + if name in get_storage._cache[path]: - return get_storage._cache[path] + return get_storage._cache[path][name] Storage = utils.import_path(path) - get_storage._cache[path] = Storage(name, **opts) + get_storage._cache[path].update({ + name: Storage(name, **opts) + }) - return get_storage._cache[path] + return get_storage._cache[path][name] - get_storage._cache = {} + get_storage._cache = defaultdict(dict) def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
Fix get_storage cache to hold separate entries for each experiment key
## Code Before: from copy import deepcopy from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: return get_storage._cache[path] Storage = utils.import_path(path) get_storage._cache[path] = Storage(name, **opts) return get_storage._cache[path] get_storage._cache = {} def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear() ## Instruction: Fix get_storage cache to hold separate entries for each experiment key ## Code After: from copy import deepcopy from collections import defaultdict from simpleflake import simpleflake from lab_assistant import conf, utils __all__ = [ 'get_storage', 'store', 'retrieve', 'retrieve_all', 'clear', ] def get_storage(path=None, name='Experiment', **opts): if not path: path = conf.storage['path'] _opts = deepcopy(conf.storage.get('options', {})) _opts.update(opts) opts = _opts if path in get_storage._cache: if name in get_storage._cache[path]: return get_storage._cache[path][name] Storage = utils.import_path(path) get_storage._cache[path].update({ name: Storage(name, **opts) }) return get_storage._cache[path][name] get_storage._cache = defaultdict(dict) def store(result, storage=None): storage = storage or get_storage(name=result.experiment.name) key = simpleflake() storage.set(key, result) return key def retrieve(key, storage=None): storage = storage or get_storage() return storage.get(key) def retrieve_all(storage=None): return (storage or get_storage()).list() def remove(key, storage=None): (storage or get_storage()).remove(key) def clear(storage=None): return (storage or get_storage()).clear()
// ... existing code ... from copy import deepcopy from collections import defaultdict // ... modified code ... if path in get_storage._cache: if name in get_storage._cache[path]: return get_storage._cache[path][name] ... Storage = utils.import_path(path) get_storage._cache[path].update({ name: Storage(name, **opts) }) return get_storage._cache[path][name] get_storage._cache = defaultdict(dict) // ... rest of the code ...
a5ef9a5d141ba5fd0d1d6c983cd8ac82079a1782
run_tests.py
run_tests.py
import os import tempfile from distutils.sysconfig import get_python_lib from coalib.tests.TestHelper import TestHelper if __name__ == '__main__': parser = TestHelper.create_argparser(description="Runs coalas tests.") parser.add_argument("-b", "--ignore-bear-tests", help="ignore bear tests", action="store_true") parser.add_argument("-m", "--ignore-main-tests", help="ignore main program tests", action="store_true") testhelper = TestHelper(parser) if not testhelper.args.ignore_main_tests: testhelper.add_test_files(os.path.abspath(os.path.join("coalib", "tests"))) if not testhelper.args.ignore_bear_tests: testhelper.add_test_files(os.path.abspath(os.path.join("bears", "tests"))) ignore_list = [ os.path.join(tempfile.gettempdir(), "**"), os.path.join(get_python_lib(), "**"), os.path.join("coalib", "tests", "**"), os.path.join("bears", "tests", "**") ] exit(testhelper.execute_python3_files(ignore_list))
import os import tempfile from distutils.sysconfig import get_python_lib from coalib.tests.TestHelper import TestHelper if __name__ == '__main__': parser = TestHelper.create_argparser(description="Runs coalas tests.") parser.add_argument("-b", "--ignore-bear-tests", help="ignore bear tests", action="store_true") parser.add_argument("-m", "--ignore-main-tests", help="ignore main program tests", action="store_true") testhelper = TestHelper(parser) if not testhelper.args.ignore_main_tests: testhelper.add_test_files(os.path.abspath(os.path.join("coalib", "tests"))) if not testhelper.args.ignore_bear_tests: testhelper.add_test_files(os.path.abspath(os.path.join("bears", "tests"))) ignore_list = [ os.path.join(tempfile.gettempdir(), "**"), os.path.join(os.path.dirname(get_python_lib()), "**"), os.path.join("coalib", "tests", "**"), os.path.join("bears", "tests", "**") ] exit(testhelper.execute_python3_files(ignore_list))
Update run_test.py to fix coverage
tests: Update run_test.py to fix coverage
Python
agpl-3.0
Asalle/coala,ManjiriBirajdar/coala,jayvdb/coala,Asnelchristian/coala,RJ722/coala,abhiroyg/coala,FeodorFitsner/coala,meetmangukiya/coala,sils1297/coala,Tanmay28/coala,yashLadha/coala,Asalle/coala,scottbelden/coala,stevemontana1980/coala,sophiavanvalkenburg/coala,Tanmay28/coala,JohnS-01/coala,Nosferatul/coala,yashLadha/coala,SambitAcharya/coala,arjunsinghy96/coala,d6e/coala,sagark123/coala,Tanmay28/coala,mr-karan/coala,SanketDG/coala,CruiseDevice/coala,sagark123/coala,refeed/coala,NalinG/coala,SambitAcharya/coala,AbdealiJK/coala,sudheesh001/coala,kartikeys98/coala,coala-analyzer/coala,karansingh1559/coala,NalinG/coala,CruiseDevice/coala,Shade5/coala,RJ722/coala,lonewolf07/coala,rresol/coala,NiklasMM/coala,impmihai/coala,SanketDG/coala,dagdaggo/coala,Tanmay28/coala,incorrectusername/coala,coala-analyzer/coala,CruiseDevice/coala,Shade5/coala,vinc456/coala,yashtrivedi96/coala,NiklasMM/coala,coala-analyzer/coala,nemaniarjun/coala,JohnS-01/coala,abhiroyg/coala,coala/coala,Tanmay28/coala,swatilodha/coala,Shade5/coala,SambitAcharya/coala,dagdaggo/coala,incorrectusername/coala,mr-karan/coala,nemaniarjun/coala,stevemontana1980/coala,Asnelchristian/coala,MattAllmendinger/coala,yashtrivedi96/coala,coala/coala,MattAllmendinger/coala,shreyans800755/coala,dagdaggo/coala,damngamerz/coala,meetmangukiya/coala,SambitAcharya/coala,arush0311/coala,Balaji2198/coala,ManjiriBirajdar/coala,rimacone/testing2,andreimacavei/coala,sils1297/coala,rresol/coala,AbdealiJK/coala,AdeshAtole/coala,netman92/coala,Nosferatul/coala,Uran198/coala,arjunsinghy96/coala,yland/coala,karansingh1559/coala,SambitAcharya/coala,arush0311/coala,vinc456/coala,NalinG/coala,shreyans800755/coala,NalinG/coala,tushar-rishav/coala,incorrectusername/coala,arafsheikh/coala,andreimacavei/coala,Tanmay28/coala,d6e/coala,tltuan/coala,sophiavanvalkenburg/coala,svsn2117/coala,arush0311/coala,lonewolf07/coala,meetmangukiya/coala,aptrishu/coala,MattAllmendinger/coala,swatilodha/coala,SanketDG/coala,RJ722/coala,damngamerz/coala,aptrishu/coala,kartikeys98/coala,scriptnull/coala,rresol/coala,sils1297/coala,ayushin78/coala,tltuan/coala,ayushin78/coala,swatilodha/coala,saurabhiiit/coala,impmihai/coala,svsn2117/coala,FeodorFitsner/coala,shreyans800755/coala,NiklasMM/coala,d6e/coala,arjunsinghy96/coala,coala/coala,Asalle/coala,scriptnull/coala,abhiroyg/coala,FeodorFitsner/coala,AdeshAtole/coala,Uran198/coala,tushar-rishav/coala,tushar-rishav/coala,yland/coala,karansingh1559/coala,vinc456/coala,aptrishu/coala,MariosPanag/coala,impmihai/coala,Balaji2198/coala,netman92/coala,mr-karan/coala,djkonro/coala,netman92/coala,nemaniarjun/coala,damngamerz/coala,scriptnull/coala,NalinG/coala,kartikeys98/coala,MariosPanag/coala,Asnelchristian/coala,scriptnull/coala,refeed/coala,Balaji2198/coala,djkonro/coala,saurabhiiit/coala,arafsheikh/coala,scriptnull/coala,scottbelden/coala,sudheesh001/coala,Tanmay28/coala,Uran198/coala,sophiavanvalkenburg/coala,MariosPanag/coala,refeed/coala,jayvdb/coala,jayvdb/coala,svsn2117/coala,rimacone/testing2,stevemontana1980/coala,sudheesh001/coala,yland/coala,scriptnull/coala,ManjiriBirajdar/coala,AbdealiJK/coala,Tanmay28/coala,andreimacavei/coala,NalinG/coala,saurabhiiit/coala,AdeshAtole/coala,ayushin78/coala,lonewolf07/coala,scriptnull/coala,tltuan/coala,rimacone/testing2,yashLadha/coala,SambitAcharya/coala,scottbelden/coala,sagark123/coala,JohnS-01/coala,NalinG/coala,arafsheikh/coala,SambitAcharya/coala,Nosferatul/coala,djkonro/coala,yashtrivedi96/coala
import os import tempfile from distutils.sysconfig import get_python_lib from coalib.tests.TestHelper import TestHelper if __name__ == '__main__': parser = TestHelper.create_argparser(description="Runs coalas tests.") parser.add_argument("-b", "--ignore-bear-tests", help="ignore bear tests", action="store_true") parser.add_argument("-m", "--ignore-main-tests", help="ignore main program tests", action="store_true") testhelper = TestHelper(parser) if not testhelper.args.ignore_main_tests: testhelper.add_test_files(os.path.abspath(os.path.join("coalib", "tests"))) if not testhelper.args.ignore_bear_tests: testhelper.add_test_files(os.path.abspath(os.path.join("bears", "tests"))) ignore_list = [ os.path.join(tempfile.gettempdir(), "**"), - os.path.join(get_python_lib(), "**"), + os.path.join(os.path.dirname(get_python_lib()), "**"), os.path.join("coalib", "tests", "**"), os.path.join("bears", "tests", "**") ] exit(testhelper.execute_python3_files(ignore_list))
Update run_test.py to fix coverage
## Code Before: import os import tempfile from distutils.sysconfig import get_python_lib from coalib.tests.TestHelper import TestHelper if __name__ == '__main__': parser = TestHelper.create_argparser(description="Runs coalas tests.") parser.add_argument("-b", "--ignore-bear-tests", help="ignore bear tests", action="store_true") parser.add_argument("-m", "--ignore-main-tests", help="ignore main program tests", action="store_true") testhelper = TestHelper(parser) if not testhelper.args.ignore_main_tests: testhelper.add_test_files(os.path.abspath(os.path.join("coalib", "tests"))) if not testhelper.args.ignore_bear_tests: testhelper.add_test_files(os.path.abspath(os.path.join("bears", "tests"))) ignore_list = [ os.path.join(tempfile.gettempdir(), "**"), os.path.join(get_python_lib(), "**"), os.path.join("coalib", "tests", "**"), os.path.join("bears", "tests", "**") ] exit(testhelper.execute_python3_files(ignore_list)) ## Instruction: Update run_test.py to fix coverage ## Code After: import os import tempfile from distutils.sysconfig import get_python_lib from coalib.tests.TestHelper import TestHelper if __name__ == '__main__': parser = TestHelper.create_argparser(description="Runs coalas tests.") parser.add_argument("-b", "--ignore-bear-tests", help="ignore bear tests", action="store_true") parser.add_argument("-m", "--ignore-main-tests", help="ignore main program tests", action="store_true") testhelper = TestHelper(parser) if not testhelper.args.ignore_main_tests: testhelper.add_test_files(os.path.abspath(os.path.join("coalib", "tests"))) if not testhelper.args.ignore_bear_tests: testhelper.add_test_files(os.path.abspath(os.path.join("bears", "tests"))) ignore_list = [ os.path.join(tempfile.gettempdir(), "**"), os.path.join(os.path.dirname(get_python_lib()), "**"), os.path.join("coalib", "tests", "**"), os.path.join("bears", "tests", "**") ] exit(testhelper.execute_python3_files(ignore_list))
... os.path.join(tempfile.gettempdir(), "**"), os.path.join(os.path.dirname(get_python_lib()), "**"), os.path.join("coalib", "tests", "**"), ...