commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 10
2.94k
| new_contents
stringlengths 21
3.18k
| subject
stringlengths 16
444
| message
stringlengths 17
2.63k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43k
| ndiff
stringlengths 52
3.32k
| instruction
stringlengths 16
444
| content
stringlengths 133
4.32k
| fuzzy_diff
stringlengths 16
3.18k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8c97ffed1531315dd50639c40b0bccad0fc1ef2d
|
textual_runtime.py
|
textual_runtime.py
|
from game import DiscState
class TextualRuntime:
def __init__(self, game):
self.game = game
self.state = {
"continue": True
}
def start(self):
while self.state["continue"]:
self.render()
self.eval(self.get_input())
def render(self):
str_repr = ["Current board state:\n"]
str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)]
for row in self.game.grid:
row_repr = []
for disc_value in row:
if disc_value is DiscState.empty:
row_repr.append("| |")
elif disc_value is DiscState.red:
row_repr.append("|O|")
else: # disc_value is black
row_repr.append("|X|")
row_repr.append("\n")
str_repr += row_repr
print("".join(str_repr))
def get_input(self):
return input("--> ")
def eval(self, command):
tokens = command.split()
if len(tokens) == 1:
if tokens[0] == "quit":
self.state["continue"] = False
|
from game import DiscState
class TextualRuntime:
def __init__(self, game):
self.game = game
self.state = {
"continue": True
}
def start(self):
while self.state["continue"]:
self.render()
self.eval(self.get_input())
def render(self):
str_repr = ["Current board state:\n"]
str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)] + ["\n"]
for row in self.game.grid:
row_repr = []
for disc_value in row:
if disc_value is DiscState.empty:
row_repr.append("| |")
elif disc_value is DiscState.red:
row_repr.append("|O|")
else: # disc_value is black
row_repr.append("|X|")
row_repr.append("\n")
str_repr += row_repr
print("".join(str_repr))
def get_input(self):
return input("--> ")
def eval(self, command):
tokens = command.split()
if len(tokens) == 1:
if tokens[0] == "quit":
self.state["continue"] = False
elif tokens[0].isdigit():
col_index = int(tokens[0])
new_point = self.game.try_turn(self.game.current_player, col_index)
|
Add ability to drop discs on slots
|
Add ability to drop discs on slots
|
Python
|
mit
|
misterwilliam/connect-four
|
from game import DiscState
class TextualRuntime:
def __init__(self, game):
self.game = game
self.state = {
"continue": True
}
def start(self):
while self.state["continue"]:
self.render()
self.eval(self.get_input())
def render(self):
str_repr = ["Current board state:\n"]
- str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)]
+ str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)] + ["\n"]
for row in self.game.grid:
row_repr = []
for disc_value in row:
if disc_value is DiscState.empty:
row_repr.append("| |")
elif disc_value is DiscState.red:
row_repr.append("|O|")
else: # disc_value is black
row_repr.append("|X|")
row_repr.append("\n")
str_repr += row_repr
print("".join(str_repr))
def get_input(self):
return input("--> ")
def eval(self, command):
tokens = command.split()
if len(tokens) == 1:
if tokens[0] == "quit":
self.state["continue"] = False
+ elif tokens[0].isdigit():
+ col_index = int(tokens[0])
+ new_point = self.game.try_turn(self.game.current_player, col_index)
|
Add ability to drop discs on slots
|
## Code Before:
from game import DiscState
class TextualRuntime:
def __init__(self, game):
self.game = game
self.state = {
"continue": True
}
def start(self):
while self.state["continue"]:
self.render()
self.eval(self.get_input())
def render(self):
str_repr = ["Current board state:\n"]
str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)]
for row in self.game.grid:
row_repr = []
for disc_value in row:
if disc_value is DiscState.empty:
row_repr.append("| |")
elif disc_value is DiscState.red:
row_repr.append("|O|")
else: # disc_value is black
row_repr.append("|X|")
row_repr.append("\n")
str_repr += row_repr
print("".join(str_repr))
def get_input(self):
return input("--> ")
def eval(self, command):
tokens = command.split()
if len(tokens) == 1:
if tokens[0] == "quit":
self.state["continue"] = False
## Instruction:
Add ability to drop discs on slots
## Code After:
from game import DiscState
class TextualRuntime:
def __init__(self, game):
self.game = game
self.state = {
"continue": True
}
def start(self):
while self.state["continue"]:
self.render()
self.eval(self.get_input())
def render(self):
str_repr = ["Current board state:\n"]
str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)] + ["\n"]
for row in self.game.grid:
row_repr = []
for disc_value in row:
if disc_value is DiscState.empty:
row_repr.append("| |")
elif disc_value is DiscState.red:
row_repr.append("|O|")
else: # disc_value is black
row_repr.append("|X|")
row_repr.append("\n")
str_repr += row_repr
print("".join(str_repr))
def get_input(self):
return input("--> ")
def eval(self, command):
tokens = command.split()
if len(tokens) == 1:
if tokens[0] == "quit":
self.state["continue"] = False
elif tokens[0].isdigit():
col_index = int(tokens[0])
new_point = self.game.try_turn(self.game.current_player, col_index)
|
...
str_repr = ["Current board state:\n"]
str_repr += [" %i " % col_index for col_index in range(self.game.grid.width)] + ["\n"]
for row in self.game.grid:
...
self.state["continue"] = False
elif tokens[0].isdigit():
col_index = int(tokens[0])
new_point = self.game.try_turn(self.game.current_player, col_index)
...
|
5f27e570a369fbb408a48a567064a96f1ceac277
|
tests/commands/project/utils.py
|
tests/commands/project/utils.py
|
from uuid import uuid4
import requests_mock
from tests.utils import get_project_list_data
from valohai_cli.utils import get_random_string
def get_project_mock(create_project_name=None, existing_projects=None):
username = get_random_string()
m = requests_mock.mock()
if isinstance(existing_projects, int):
existing_projects = get_project_list_data([get_random_string() for x in range(existing_projects)])
if existing_projects is not None:
m.get('https://app.valohai.com/api/v0/projects/', json=existing_projects)
if create_project_name:
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
'id': str(uuid4()),
'name': create_project_name,
'owner': {
'id': 8,
'username': username,
}
})
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
return m
|
from uuid import uuid4
import requests_mock
from tests.utils import get_project_list_data
from valohai_cli.utils import get_random_string
def get_project_mock(create_project_name=None, existing_projects=None):
username = get_random_string()
project_id = uuid4()
m = requests_mock.mock()
if isinstance(existing_projects, int):
existing_projects = get_project_list_data([get_random_string() for x in range(existing_projects)])
if existing_projects is not None:
m.get('https://app.valohai.com/api/v0/projects/', json=existing_projects)
if create_project_name:
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
'id': str(project_id),
'name': create_project_name,
'owner': {
'id': 8,
'username': username,
}
})
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
m.get(f'https://app.valohai.com/api/v0/projects/{project_id}/', json={
'id': str(project_id),
'yaml_path': 'valohai.yaml',
})
return m
|
Add a mock API path for project details, used in e.g. test_init
|
Add a mock API path for project details, used in e.g. test_init
|
Python
|
mit
|
valohai/valohai-cli
|
from uuid import uuid4
import requests_mock
from tests.utils import get_project_list_data
from valohai_cli.utils import get_random_string
def get_project_mock(create_project_name=None, existing_projects=None):
username = get_random_string()
+ project_id = uuid4()
m = requests_mock.mock()
if isinstance(existing_projects, int):
existing_projects = get_project_list_data([get_random_string() for x in range(existing_projects)])
if existing_projects is not None:
m.get('https://app.valohai.com/api/v0/projects/', json=existing_projects)
if create_project_name:
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
- 'id': str(uuid4()),
+ 'id': str(project_id),
'name': create_project_name,
'owner': {
'id': 8,
'username': username,
}
})
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
+ m.get(f'https://app.valohai.com/api/v0/projects/{project_id}/', json={
+ 'id': str(project_id),
+ 'yaml_path': 'valohai.yaml',
+ })
return m
|
Add a mock API path for project details, used in e.g. test_init
|
## Code Before:
from uuid import uuid4
import requests_mock
from tests.utils import get_project_list_data
from valohai_cli.utils import get_random_string
def get_project_mock(create_project_name=None, existing_projects=None):
username = get_random_string()
m = requests_mock.mock()
if isinstance(existing_projects, int):
existing_projects = get_project_list_data([get_random_string() for x in range(existing_projects)])
if existing_projects is not None:
m.get('https://app.valohai.com/api/v0/projects/', json=existing_projects)
if create_project_name:
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
'id': str(uuid4()),
'name': create_project_name,
'owner': {
'id': 8,
'username': username,
}
})
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
return m
## Instruction:
Add a mock API path for project details, used in e.g. test_init
## Code After:
from uuid import uuid4
import requests_mock
from tests.utils import get_project_list_data
from valohai_cli.utils import get_random_string
def get_project_mock(create_project_name=None, existing_projects=None):
username = get_random_string()
project_id = uuid4()
m = requests_mock.mock()
if isinstance(existing_projects, int):
existing_projects = get_project_list_data([get_random_string() for x in range(existing_projects)])
if existing_projects is not None:
m.get('https://app.valohai.com/api/v0/projects/', json=existing_projects)
if create_project_name:
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
'id': str(project_id),
'name': create_project_name,
'owner': {
'id': 8,
'username': username,
}
})
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
m.get(f'https://app.valohai.com/api/v0/projects/{project_id}/', json={
'id': str(project_id),
'yaml_path': 'valohai.yaml',
})
return m
|
# ... existing code ...
username = get_random_string()
project_id = uuid4()
m = requests_mock.mock()
# ... modified code ...
m.post('https://app.valohai.com/api/v0/projects/', json=lambda request, context: {
'id': str(project_id),
'name': create_project_name,
...
m.get('https://app.valohai.com/api/v0/projects/ownership_options/', json=[username])
m.get(f'https://app.valohai.com/api/v0/projects/{project_id}/', json={
'id': str(project_id),
'yaml_path': 'valohai.yaml',
})
return m
# ... rest of the code ...
|
3291b015a5f3d311c72980913756a08d87b1ac1a
|
scripts/blacklisted.py
|
scripts/blacklisted.py
|
import os
import platform
# If you are adding a new entry, please include a short comment
# explaining why the specific test is blacklisted.
_unix_black_list = set([name.lower() for name in [
'blackparrot',
'blackucode',
'blackunicore',
'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
'simpleparsertestcache', # race condition
]])
_windows_black_list = _unix_black_list.union(set([name.lower() for name in [
'ariane', # Uses shell script with make command
'earlgrey_verilator_01_05_21', # lowmem is unsupported
'unitpython', # Python is unsupported
]]))
_msys2_black_list = _unix_black_list.union(set([name.lower() for name in [
'earlgrey_verilator_01_05_21', # lowmem is unsupported
]]))
def is_blacklisted(name):
if platform.system() == 'Windows':
blacklist = _msys2_black_list if 'MSYSTEM' in os.environ else _windows_black_list
else:
blacklist = _unix_black_list
return name.lower() in blacklist
|
import os
import platform
# If you are adding a new entry, please include a short comment
# explaining why the specific test is blacklisted.
_unix_black_list = set([name.lower() for name in [
'blackparrot',
'blackucode',
'blackunicore',
'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
'rsd', # Out of memory on CI machines
'simpleparsertestcache', # race condition
]])
_windows_black_list = _unix_black_list.union(set([name.lower() for name in [
'ariane', # Uses shell script with make command
'earlgrey_verilator_01_05_21', # lowmem is unsupported
'unitpython', # Python is unsupported
'verilator', # Stack overflow with clang due to expression evaluation
]]))
_msys2_black_list = _unix_black_list.union(set([name.lower() for name in [
'earlgrey_verilator_01_05_21', # lowmem is unsupported
]]))
def is_blacklisted(name):
if platform.system() == 'Windows':
blacklist = _msys2_black_list if 'MSYSTEM' in os.environ else _windows_black_list
else:
blacklist = _unix_black_list
return name.lower() in blacklist
|
Exclude a few failing tests
|
Exclude a few failing tests
Rsd - failing on linux due to running out of memory
Verilator - failing on Windows clang due to stack overflow caused by
expression evaluation
|
Python
|
apache-2.0
|
chipsalliance/Surelog,alainmarcel/Surelog,alainmarcel/Surelog,chipsalliance/Surelog,alainmarcel/Surelog,chipsalliance/Surelog,alainmarcel/Surelog,chipsalliance/Surelog
|
import os
import platform
# If you are adding a new entry, please include a short comment
# explaining why the specific test is blacklisted.
_unix_black_list = set([name.lower() for name in [
'blackparrot',
'blackucode',
'blackunicore',
- 'earlgrey_nexysvideo', # ram size in ci machines
+ 'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
+ 'rsd', # Out of memory on CI machines
- 'simpleparsertestcache', # race condition
+ 'simpleparsertestcache', # race condition
]])
_windows_black_list = _unix_black_list.union(set([name.lower() for name in [
'ariane', # Uses shell script with make command
'earlgrey_verilator_01_05_21', # lowmem is unsupported
'unitpython', # Python is unsupported
+ 'verilator', # Stack overflow with clang due to expression evaluation
]]))
_msys2_black_list = _unix_black_list.union(set([name.lower() for name in [
'earlgrey_verilator_01_05_21', # lowmem is unsupported
]]))
def is_blacklisted(name):
if platform.system() == 'Windows':
blacklist = _msys2_black_list if 'MSYSTEM' in os.environ else _windows_black_list
else:
blacklist = _unix_black_list
return name.lower() in blacklist
|
Exclude a few failing tests
|
## Code Before:
import os
import platform
# If you are adding a new entry, please include a short comment
# explaining why the specific test is blacklisted.
_unix_black_list = set([name.lower() for name in [
'blackparrot',
'blackucode',
'blackunicore',
'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
'simpleparsertestcache', # race condition
]])
_windows_black_list = _unix_black_list.union(set([name.lower() for name in [
'ariane', # Uses shell script with make command
'earlgrey_verilator_01_05_21', # lowmem is unsupported
'unitpython', # Python is unsupported
]]))
_msys2_black_list = _unix_black_list.union(set([name.lower() for name in [
'earlgrey_verilator_01_05_21', # lowmem is unsupported
]]))
def is_blacklisted(name):
if platform.system() == 'Windows':
blacklist = _msys2_black_list if 'MSYSTEM' in os.environ else _windows_black_list
else:
blacklist = _unix_black_list
return name.lower() in blacklist
## Instruction:
Exclude a few failing tests
## Code After:
import os
import platform
# If you are adding a new entry, please include a short comment
# explaining why the specific test is blacklisted.
_unix_black_list = set([name.lower() for name in [
'blackparrot',
'blackucode',
'blackunicore',
'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
'rsd', # Out of memory on CI machines
'simpleparsertestcache', # race condition
]])
_windows_black_list = _unix_black_list.union(set([name.lower() for name in [
'ariane', # Uses shell script with make command
'earlgrey_verilator_01_05_21', # lowmem is unsupported
'unitpython', # Python is unsupported
'verilator', # Stack overflow with clang due to expression evaluation
]]))
_msys2_black_list = _unix_black_list.union(set([name.lower() for name in [
'earlgrey_verilator_01_05_21', # lowmem is unsupported
]]))
def is_blacklisted(name):
if platform.system() == 'Windows':
blacklist = _msys2_black_list if 'MSYSTEM' in os.environ else _windows_black_list
else:
blacklist = _unix_black_list
return name.lower() in blacklist
|
...
'blackunicore',
'earlgrey_nexysvideo', # ram size in ci machines
'lpddr',
'rsd', # Out of memory on CI machines
'simpleparsertestcache', # race condition
]])
...
'unitpython', # Python is unsupported
'verilator', # Stack overflow with clang due to expression evaluation
]]))
...
|
6d6e83734d0cb034f8fc198df94bc64cf412d8d6
|
ceam/framework/components.py
|
ceam/framework/components.py
|
from importlib import import_module
import json
def read_component_configuration(path):
if path.endswith('.json'):
with open(path) as f:
config = json.load(f)
return apply_defaults(config)
else:
raise ValueError("Unknown components configuration type: {}".format(path))
def apply_defaults(config):
base_components = config['components']
if 'comparisons' in config:
comparisons = {c['name']:c for c in config['comparisons']}
for comparison in comparisons.values():
comparison['components'] = base_components + comparison['components']
else:
comparisons = {'base': {'name': 'base', 'components': base_components}}
return comparisons
def load(component_list):
components = []
for component in component_list:
if isinstance(component, str) or isinstance(component, list):
if isinstance(component, list):
component, args, kwargs = component
call = True
elif component.endswith('()'):
component = component[:-2]
args = ()
kwargs = {}
call = True
else:
call = False
module_path, _, component_name = component.rpartition('.')
component = getattr(import_module(module_path), component_name)
if call:
component = component(*args, **kwargs)
if isinstance(component, type):
component = component()
components.append(component)
return components
|
from importlib import import_module
from collections import Iterable
import json
def read_component_configuration(path):
if path.endswith('.json'):
with open(path) as f:
config = json.load(f)
return apply_defaults(config)
else:
raise ValueError("Unknown components configuration type: {}".format(path))
def apply_defaults(config):
base_components = config['components']
if 'comparisons' in config:
comparisons = {c['name']:c for c in config['comparisons']}
for comparison in comparisons.values():
comparison['components'] = base_components + comparison['components']
else:
comparisons = {'base': {'name': 'base', 'components': base_components}}
return comparisons
def load(component_list):
components = []
for component in component_list:
if isinstance(component, str) or isinstance(component, list):
if isinstance(component, list):
component, args, kwargs = component
call = True
elif component.endswith('()'):
component = component[:-2]
args = ()
kwargs = {}
call = True
else:
call = False
module_path, _, component_name = component.rpartition('.')
component = getattr(import_module(module_path), component_name)
if call:
component = component(*args, **kwargs)
if isinstance(component, type):
component = component()
if isinstance(component, Iterable):
components.extend(component)
else:
components.append(component)
return components
|
Add support for component initialization that returns lists
|
Add support for component initialization that returns lists
|
Python
|
bsd-3-clause
|
ihmeuw/vivarium
|
from importlib import import_module
+ from collections import Iterable
import json
def read_component_configuration(path):
if path.endswith('.json'):
with open(path) as f:
config = json.load(f)
return apply_defaults(config)
else:
raise ValueError("Unknown components configuration type: {}".format(path))
def apply_defaults(config):
base_components = config['components']
if 'comparisons' in config:
comparisons = {c['name']:c for c in config['comparisons']}
for comparison in comparisons.values():
comparison['components'] = base_components + comparison['components']
else:
comparisons = {'base': {'name': 'base', 'components': base_components}}
return comparisons
def load(component_list):
components = []
for component in component_list:
if isinstance(component, str) or isinstance(component, list):
if isinstance(component, list):
component, args, kwargs = component
call = True
elif component.endswith('()'):
component = component[:-2]
args = ()
kwargs = {}
call = True
else:
call = False
module_path, _, component_name = component.rpartition('.')
component = getattr(import_module(module_path), component_name)
if call:
component = component(*args, **kwargs)
if isinstance(component, type):
component = component()
+ if isinstance(component, Iterable):
+ components.extend(component)
+ else:
- components.append(component)
+ components.append(component)
return components
|
Add support for component initialization that returns lists
|
## Code Before:
from importlib import import_module
import json
def read_component_configuration(path):
if path.endswith('.json'):
with open(path) as f:
config = json.load(f)
return apply_defaults(config)
else:
raise ValueError("Unknown components configuration type: {}".format(path))
def apply_defaults(config):
base_components = config['components']
if 'comparisons' in config:
comparisons = {c['name']:c for c in config['comparisons']}
for comparison in comparisons.values():
comparison['components'] = base_components + comparison['components']
else:
comparisons = {'base': {'name': 'base', 'components': base_components}}
return comparisons
def load(component_list):
components = []
for component in component_list:
if isinstance(component, str) or isinstance(component, list):
if isinstance(component, list):
component, args, kwargs = component
call = True
elif component.endswith('()'):
component = component[:-2]
args = ()
kwargs = {}
call = True
else:
call = False
module_path, _, component_name = component.rpartition('.')
component = getattr(import_module(module_path), component_name)
if call:
component = component(*args, **kwargs)
if isinstance(component, type):
component = component()
components.append(component)
return components
## Instruction:
Add support for component initialization that returns lists
## Code After:
from importlib import import_module
from collections import Iterable
import json
def read_component_configuration(path):
if path.endswith('.json'):
with open(path) as f:
config = json.load(f)
return apply_defaults(config)
else:
raise ValueError("Unknown components configuration type: {}".format(path))
def apply_defaults(config):
base_components = config['components']
if 'comparisons' in config:
comparisons = {c['name']:c for c in config['comparisons']}
for comparison in comparisons.values():
comparison['components'] = base_components + comparison['components']
else:
comparisons = {'base': {'name': 'base', 'components': base_components}}
return comparisons
def load(component_list):
components = []
for component in component_list:
if isinstance(component, str) or isinstance(component, list):
if isinstance(component, list):
component, args, kwargs = component
call = True
elif component.endswith('()'):
component = component[:-2]
args = ()
kwargs = {}
call = True
else:
call = False
module_path, _, component_name = component.rpartition('.')
component = getattr(import_module(module_path), component_name)
if call:
component = component(*args, **kwargs)
if isinstance(component, type):
component = component()
if isinstance(component, Iterable):
components.extend(component)
else:
components.append(component)
return components
|
# ... existing code ...
from importlib import import_module
from collections import Iterable
import json
# ... modified code ...
component = component()
if isinstance(component, Iterable):
components.extend(component)
else:
components.append(component)
# ... rest of the code ...
|
599ec99b6f57e37f7f4009afb9498abffd70ff34
|
grammpy_transforms/SplittedRules/splitted_rules.py
|
grammpy_transforms/SplittedRules/splitted_rules.py
|
from grammpy import Nonterminal
def splitted_rules(root: Nonterminal):
return root
|
from grammpy import Nonterminal, Rule, EPSILON
from grammpy.Grammars.MultipleRulesGrammar import SplitRule
class Adding:
def __init__(self, rule: Rule):
self.rule = rule
self.processed = False
def process(self):
child_symbols = self.rule.to_symbols
self.processed = True
child_rules = []
for child in child_symbols: # type: Nonterminal
if child.to_rule is not None:
child_rules.append(child.to_rule)
return child_rules
def splitted_rules(root: Nonterminal):
stack = list()
stack.append(Adding(root.to_rule))
while len(stack) > 0:
proc = stack.pop() # type: Adding
if not proc.processed:
add = proc.process()
stack.append(proc)
for a in add:
stack.append(Adding(a))
elif isinstance(proc.rule, SplitRule):
created_rule = proc.rule.from_rule() # type: Rule
#Solve parents
for s in proc.rule.from_symbols: # type: Nonterminal
s._set_to_rule(created_rule)
created_rule._from_symbols.append(s)
#Solve childs
for ch in proc.rule.to_symbols:
ch._set_from_rule(created_rule)
created_rule.to_symbols.append(ch)
stack.append(Adding(created_rule))
return root
|
Add implementation of splitted rules
|
Add implementation of splitted rules
|
Python
|
mit
|
PatrikValkovic/grammpy
|
- from grammpy import Nonterminal
+ from grammpy import Nonterminal, Rule, EPSILON
+ from grammpy.Grammars.MultipleRulesGrammar import SplitRule
+ class Adding:
+ def __init__(self, rule: Rule):
+ self.rule = rule
+ self.processed = False
+
+ def process(self):
+ child_symbols = self.rule.to_symbols
+ self.processed = True
+ child_rules = []
+ for child in child_symbols: # type: Nonterminal
+ if child.to_rule is not None:
+ child_rules.append(child.to_rule)
+ return child_rules
+
def splitted_rules(root: Nonterminal):
+ stack = list()
+ stack.append(Adding(root.to_rule))
+ while len(stack) > 0:
+ proc = stack.pop() # type: Adding
+ if not proc.processed:
+ add = proc.process()
+ stack.append(proc)
+ for a in add:
+ stack.append(Adding(a))
+ elif isinstance(proc.rule, SplitRule):
+ created_rule = proc.rule.from_rule() # type: Rule
+ #Solve parents
+ for s in proc.rule.from_symbols: # type: Nonterminal
+ s._set_to_rule(created_rule)
+ created_rule._from_symbols.append(s)
+ #Solve childs
+ for ch in proc.rule.to_symbols:
+ ch._set_from_rule(created_rule)
+ created_rule.to_symbols.append(ch)
+ stack.append(Adding(created_rule))
return root
|
Add implementation of splitted rules
|
## Code Before:
from grammpy import Nonterminal
def splitted_rules(root: Nonterminal):
return root
## Instruction:
Add implementation of splitted rules
## Code After:
from grammpy import Nonterminal, Rule, EPSILON
from grammpy.Grammars.MultipleRulesGrammar import SplitRule
class Adding:
def __init__(self, rule: Rule):
self.rule = rule
self.processed = False
def process(self):
child_symbols = self.rule.to_symbols
self.processed = True
child_rules = []
for child in child_symbols: # type: Nonterminal
if child.to_rule is not None:
child_rules.append(child.to_rule)
return child_rules
def splitted_rules(root: Nonterminal):
stack = list()
stack.append(Adding(root.to_rule))
while len(stack) > 0:
proc = stack.pop() # type: Adding
if not proc.processed:
add = proc.process()
stack.append(proc)
for a in add:
stack.append(Adding(a))
elif isinstance(proc.rule, SplitRule):
created_rule = proc.rule.from_rule() # type: Rule
#Solve parents
for s in proc.rule.from_symbols: # type: Nonterminal
s._set_to_rule(created_rule)
created_rule._from_symbols.append(s)
#Solve childs
for ch in proc.rule.to_symbols:
ch._set_from_rule(created_rule)
created_rule.to_symbols.append(ch)
stack.append(Adding(created_rule))
return root
|
// ... existing code ...
from grammpy import Nonterminal, Rule, EPSILON
from grammpy.Grammars.MultipleRulesGrammar import SplitRule
// ... modified code ...
class Adding:
def __init__(self, rule: Rule):
self.rule = rule
self.processed = False
def process(self):
child_symbols = self.rule.to_symbols
self.processed = True
child_rules = []
for child in child_symbols: # type: Nonterminal
if child.to_rule is not None:
child_rules.append(child.to_rule)
return child_rules
def splitted_rules(root: Nonterminal):
stack = list()
stack.append(Adding(root.to_rule))
while len(stack) > 0:
proc = stack.pop() # type: Adding
if not proc.processed:
add = proc.process()
stack.append(proc)
for a in add:
stack.append(Adding(a))
elif isinstance(proc.rule, SplitRule):
created_rule = proc.rule.from_rule() # type: Rule
#Solve parents
for s in proc.rule.from_symbols: # type: Nonterminal
s._set_to_rule(created_rule)
created_rule._from_symbols.append(s)
#Solve childs
for ch in proc.rule.to_symbols:
ch._set_from_rule(created_rule)
created_rule.to_symbols.append(ch)
stack.append(Adding(created_rule))
return root
// ... rest of the code ...
|
c008171b93371c72a2f2a2698f514d267e312837
|
tests/testapp/urls.py
|
tests/testapp/urls.py
|
from django.conf.urls import url
from django.contrib import admin
from .views import ArticleView, PageView
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
url(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
|
from django.urls import re_path
from django.contrib import admin
from .views import ArticleView, PageView
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
re_path(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
|
Switch from url() to re_path()
|
Switch from url() to re_path()
|
Python
|
bsd-3-clause
|
matthiask/django-content-editor,matthiask/feincms2-content,matthiask/feincms2-content,matthiask/django-content-editor,matthiask/feincms2-content,matthiask/django-content-editor,matthiask/django-content-editor
|
- from django.conf.urls import url
+ from django.urls import re_path
from django.contrib import admin
from .views import ArticleView, PageView
urlpatterns = [
- url(r"^admin/", admin.site.urls),
+ re_path(r"^admin/", admin.site.urls),
- url(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
+ re_path(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
- url(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
+ re_path(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
|
Switch from url() to re_path()
|
## Code Before:
from django.conf.urls import url
from django.contrib import admin
from .views import ArticleView, PageView
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
url(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
## Instruction:
Switch from url() to re_path()
## Code After:
from django.urls import re_path
from django.contrib import admin
from .views import ArticleView, PageView
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
re_path(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
|
# ... existing code ...
from django.urls import re_path
from django.contrib import admin
# ... modified code ...
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(r"^articles/(?P<pk>\d+)/$", ArticleView.as_view(), name="article_detail"),
re_path(r"^pages/(?P<pk>\d+)/$", PageView.as_view(), name="page_detail"),
]
# ... rest of the code ...
|
20df58bb9e605ecc53848ade31a3acb98118f00b
|
scripts/extract_clips_from_hdf5_file.py
|
scripts/extract_clips_from_hdf5_file.py
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, attributes = read_clip(clip_group, clip_id)
show_clip(clip_id, samples, attributes)
write_wave_file(clip_id, samples, attributes['sample_rate'])
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
attributes = dict((name, value) for name, value in clip.attrs.items())
return samples, attributes
def show_clip(clip_id, samples, attributes):
print(f'clip {clip_id}:')
print(f' length: {len(samples)}')
print(' attributes:')
for key in sorted(attributes.keys()):
value = attributes[key]
print(f' {key}: {value}')
print()
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
Add attribute display to clip extraction script.
|
Add attribute display to clip extraction script.
|
Python
|
mit
|
HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
- samples, sample_rate = read_clip(clip_group, clip_id)
+ samples, attributes = read_clip(clip_group, clip_id)
- print(clip_id, len(samples), samples.dtype, sample_rate)
+ show_clip(clip_id, samples, attributes)
- write_wave_file(clip_id, samples, sample_rate)
+ write_wave_file(clip_id, samples, attributes['sample_rate'])
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
- sample_rate = clip.attrs['sample_rate']
+ attributes = dict((name, value) for name, value in clip.attrs.items())
- return samples, sample_rate
+ return samples, attributes
+
+ def show_clip(clip_id, samples, attributes):
+ print(f'clip {clip_id}:')
+ print(f' length: {len(samples)}')
+ print(' attributes:')
+ for key in sorted(attributes.keys()):
+ value = attributes[key]
+ print(f' {key}: {value}')
+ print()
+
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
Add attribute display to clip extraction script.
|
## Code Before:
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
## Instruction:
Add attribute display to clip extraction script.
## Code After:
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, attributes = read_clip(clip_group, clip_id)
show_clip(clip_id, samples, attributes)
write_wave_file(clip_id, samples, attributes['sample_rate'])
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
attributes = dict((name, value) for name, value in clip.attrs.items())
return samples, attributes
def show_clip(clip_id, samples, attributes):
print(f'clip {clip_id}:')
print(f' length: {len(samples)}')
print(' attributes:')
for key in sorted(attributes.keys()):
value = attributes[key]
print(f' {key}: {value}')
print()
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
// ... existing code ...
samples, attributes = read_clip(clip_group, clip_id)
show_clip(clip_id, samples, attributes)
write_wave_file(clip_id, samples, attributes['sample_rate'])
// ... modified code ...
samples = clip[:]
attributes = dict((name, value) for name, value in clip.attrs.items())
return samples, attributes
def show_clip(clip_id, samples, attributes):
print(f'clip {clip_id}:')
print(f' length: {len(samples)}')
print(' attributes:')
for key in sorted(attributes.keys()):
value = attributes[key]
print(f' {key}: {value}')
print()
// ... rest of the code ...
|
d866dc0f6a33925e2a8cd910a8b6226f8b7ed50d
|
pytablereader/__init__.py
|
pytablereader/__init__.py
|
from __future__ import absolute_import
from tabledata import (
DataError,
EmptyDataError,
InvalidDataError,
InvalidHeaderNameError,
InvalidTableNameError,
)
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
APIError,
HTTPError,
InvalidFilePathError,
LoaderNotFoundError,
OpenError,
PathError,
ProxyError,
PypandocImportError,
UrlError,
ValidationError,
)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableDictLoader, JsonTableFileLoader, JsonTableTextLoader
from .jsonlines.core import JsonLinesTableFileLoader, JsonLinesTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
|
from __future__ import absolute_import
from tabledata import DataError, InvalidHeaderNameError, InvalidTableNameError
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
APIError,
HTTPError,
InvalidFilePathError,
LoaderNotFoundError,
OpenError,
PathError,
ProxyError,
PypandocImportError,
UrlError,
ValidationError,
)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableDictLoader, JsonTableFileLoader, JsonTableTextLoader
from .jsonlines.core import JsonLinesTableFileLoader, JsonLinesTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
|
Remove an import that deprecated and unused
|
Remove an import that deprecated and unused
|
Python
|
mit
|
thombashi/pytablereader,thombashi/pytablereader,thombashi/pytablereader
|
from __future__ import absolute_import
+ from tabledata import DataError, InvalidHeaderNameError, InvalidTableNameError
- from tabledata import (
- DataError,
- EmptyDataError,
- InvalidDataError,
- InvalidHeaderNameError,
- InvalidTableNameError,
- )
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
APIError,
HTTPError,
InvalidFilePathError,
LoaderNotFoundError,
OpenError,
PathError,
ProxyError,
PypandocImportError,
UrlError,
ValidationError,
)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableDictLoader, JsonTableFileLoader, JsonTableTextLoader
from .jsonlines.core import JsonLinesTableFileLoader, JsonLinesTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
|
Remove an import that deprecated and unused
|
## Code Before:
from __future__ import absolute_import
from tabledata import (
DataError,
EmptyDataError,
InvalidDataError,
InvalidHeaderNameError,
InvalidTableNameError,
)
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
APIError,
HTTPError,
InvalidFilePathError,
LoaderNotFoundError,
OpenError,
PathError,
ProxyError,
PypandocImportError,
UrlError,
ValidationError,
)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableDictLoader, JsonTableFileLoader, JsonTableTextLoader
from .jsonlines.core import JsonLinesTableFileLoader, JsonLinesTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
## Instruction:
Remove an import that deprecated and unused
## Code After:
from __future__ import absolute_import
from tabledata import DataError, InvalidHeaderNameError, InvalidTableNameError
from .__version__ import __author__, __copyright__, __email__, __license__, __version__
from ._constant import PatternMatch
from ._logger import logger, set_log_level, set_logger
from .csv.core import CsvTableFileLoader, CsvTableTextLoader
from .error import (
APIError,
HTTPError,
InvalidFilePathError,
LoaderNotFoundError,
OpenError,
PathError,
ProxyError,
PypandocImportError,
UrlError,
ValidationError,
)
from .html.core import HtmlTableFileLoader, HtmlTableTextLoader
from .json.core import JsonTableDictLoader, JsonTableFileLoader, JsonTableTextLoader
from .jsonlines.core import JsonLinesTableFileLoader, JsonLinesTableTextLoader
from .loadermanager import TableFileLoader, TableUrlLoader
from .ltsv.core import LtsvTableFileLoader, LtsvTableTextLoader
from .markdown.core import MarkdownTableFileLoader, MarkdownTableTextLoader
from .mediawiki.core import MediaWikiTableFileLoader, MediaWikiTableTextLoader
from .spreadsheet.excelloader import ExcelTableFileLoader
from .spreadsheet.gsloader import GoogleSheetsTableLoader
from .sqlite.core import SqliteFileLoader
from .tsv.core import TsvTableFileLoader, TsvTableTextLoader
|
# ... existing code ...
from tabledata import DataError, InvalidHeaderNameError, InvalidTableNameError
# ... rest of the code ...
|
281a096cea735845bdb74d60abf14f1422f2c624
|
test_runner/executable.py
|
test_runner/executable.py
|
import argh
from .environments import Environment
from .frameworks import Tempest
from .utils import cleanup, Reporter
LOG = Reporter(__name__).setup()
def main(endpoint, username='admin', password='secrete', test_path='api'):
environment = Environment(username, password, endpoint)
with cleanup(environment):
environment.build()
framework = Tempest(environment, repo_dir='/opt/tempest',
test_path=test_path)
results = framework.run_tests()
LOG.info('Results: {0}'.format(results))
if __name__ == '__main__':
argh.dispatch_command(main)
|
import argh
from .environments import Environment
from .frameworks import Tempest
from .utils import cleanup, Reporter
LOG = Reporter(__name__).setup()
def main(endpoint, username='admin', password='secrete', test_path='api'):
environment = Environment(username, password, endpoint)
with cleanup(environment):
environment.build()
framework = Tempest(environment, repo_dir='/opt/tempest',
test_path=test_path)
results = framework.run_tests()
LOG.info('Results: {0}'.format(results))
argh.dispatch_command(main)
|
Move command dispatch into full module
|
Move command dispatch into full module
|
Python
|
mit
|
rcbops-qa/test_runner
|
import argh
from .environments import Environment
from .frameworks import Tempest
from .utils import cleanup, Reporter
LOG = Reporter(__name__).setup()
def main(endpoint, username='admin', password='secrete', test_path='api'):
environment = Environment(username, password, endpoint)
with cleanup(environment):
environment.build()
framework = Tempest(environment, repo_dir='/opt/tempest',
test_path=test_path)
results = framework.run_tests()
LOG.info('Results: {0}'.format(results))
+ argh.dispatch_command(main)
- if __name__ == '__main__':
- argh.dispatch_command(main)
-
|
Move command dispatch into full module
|
## Code Before:
import argh
from .environments import Environment
from .frameworks import Tempest
from .utils import cleanup, Reporter
LOG = Reporter(__name__).setup()
def main(endpoint, username='admin', password='secrete', test_path='api'):
environment = Environment(username, password, endpoint)
with cleanup(environment):
environment.build()
framework = Tempest(environment, repo_dir='/opt/tempest',
test_path=test_path)
results = framework.run_tests()
LOG.info('Results: {0}'.format(results))
if __name__ == '__main__':
argh.dispatch_command(main)
## Instruction:
Move command dispatch into full module
## Code After:
import argh
from .environments import Environment
from .frameworks import Tempest
from .utils import cleanup, Reporter
LOG = Reporter(__name__).setup()
def main(endpoint, username='admin', password='secrete', test_path='api'):
environment = Environment(username, password, endpoint)
with cleanup(environment):
environment.build()
framework = Tempest(environment, repo_dir='/opt/tempest',
test_path=test_path)
results = framework.run_tests()
LOG.info('Results: {0}'.format(results))
argh.dispatch_command(main)
|
// ... existing code ...
argh.dispatch_command(main)
// ... rest of the code ...
|
807d7efe7de00950df675e78249dcada298b6cd1
|
systemrdl/__init__.py
|
systemrdl/__init__.py
|
from .__about__ import __version__
from .compiler import RDLCompiler
from .walker import RDLListener, RDLWalker
from .messages import RDLCompileError
|
from .__about__ import __version__
from .compiler import RDLCompiler
from .walker import RDLListener, RDLWalker
from .messages import RDLCompileError
from .node import AddressableNode, VectorNode, SignalNode
from .node import FieldNode, RegNode, RegfileNode, AddrmapNode, MemNode
from .component import AddressableComponent, VectorComponent, Signal
from .component import Field, Reg, Regfile, Addrmap, Mem
|
Bring forward more contents into top namespace
|
Bring forward more contents into top namespace
|
Python
|
mit
|
SystemRDL/systemrdl-compiler,SystemRDL/systemrdl-compiler,SystemRDL/systemrdl-compiler,SystemRDL/systemrdl-compiler
|
from .__about__ import __version__
+
from .compiler import RDLCompiler
from .walker import RDLListener, RDLWalker
from .messages import RDLCompileError
+ from .node import AddressableNode, VectorNode, SignalNode
+ from .node import FieldNode, RegNode, RegfileNode, AddrmapNode, MemNode
+
+ from .component import AddressableComponent, VectorComponent, Signal
+ from .component import Field, Reg, Regfile, Addrmap, Mem
+
|
Bring forward more contents into top namespace
|
## Code Before:
from .__about__ import __version__
from .compiler import RDLCompiler
from .walker import RDLListener, RDLWalker
from .messages import RDLCompileError
## Instruction:
Bring forward more contents into top namespace
## Code After:
from .__about__ import __version__
from .compiler import RDLCompiler
from .walker import RDLListener, RDLWalker
from .messages import RDLCompileError
from .node import AddressableNode, VectorNode, SignalNode
from .node import FieldNode, RegNode, RegfileNode, AddrmapNode, MemNode
from .component import AddressableComponent, VectorComponent, Signal
from .component import Field, Reg, Regfile, Addrmap, Mem
|
// ... existing code ...
from .__about__ import __version__
from .compiler import RDLCompiler
// ... modified code ...
from .messages import RDLCompileError
from .node import AddressableNode, VectorNode, SignalNode
from .node import FieldNode, RegNode, RegfileNode, AddrmapNode, MemNode
from .component import AddressableComponent, VectorComponent, Signal
from .component import Field, Reg, Regfile, Addrmap, Mem
// ... rest of the code ...
|
79e4839c06d8a3ae8de0c9a7c0cf7b536016dde3
|
pyglab/pyglab.py
|
pyglab/pyglab.py
|
_defaults = {
'api_url': 'api/v3',
}
from .apirequest import ApiRequest, RequestType
from .users import Users
class Pyglab(object):
def __init__(self, url, token, api_url=_defaults['api_url']):
self._base_url = url.rstrip('/') + '/' + api_url.strip()
self._token = token
self._user = None
self._per_page = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self._user
self._user = user
return previous_user
def request(self, request_type, url, params={}, sudo=None, page=None,
per_page=None):
if sudo is None and self._user is not None:
sudo = _self.user
if per_page is None and self._per_page is None:
per_page = self._per_page
r = ApiRequest(request_type, self._base_url + '/' + url.lstrip('/'),
self._token, params, sudo, page, per_page)
return r.content
@property
def users(self):
u = self.request(RequestType.GET, '/users')
return Users(u)
def users_by_name(self, name):
params = {'search': name}
u = self.request(RequestType.GET, '/users', params=params)
return Users(u)
|
_defaults = {
'api_url': 'api/v3',
}
from .apirequest import ApiRequest, RequestType
from .users import Users
class Pyglab(object):
def __init__(self, url, token, api_url=_defaults['api_url']):
self._base_url = url.rstrip('/') + '/' + api_url.strip()
self._token = token
self._user = None
self._per_page = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self._user
self._user = user
return previous_user
def request(self, request_type, url, params={}, sudo=None, page=None,
per_page=None):
if sudo is None and self._user is not None:
sudo = _self.user
if per_page is None and self._per_page is None:
per_page = self._per_page
r = ApiRequest(request_type, self._base_url + '/' + url.lstrip('/'),
self._token, params, sudo, page, per_page)
return r.content
@property
def users(self):
return Users(self)
|
Create exactly one users function.
|
Create exactly one users function.
|
Python
|
mit
|
sloede/pyglab,sloede/pyglab
|
_defaults = {
'api_url': 'api/v3',
}
from .apirequest import ApiRequest, RequestType
from .users import Users
class Pyglab(object):
def __init__(self, url, token, api_url=_defaults['api_url']):
self._base_url = url.rstrip('/') + '/' + api_url.strip()
self._token = token
self._user = None
self._per_page = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self._user
self._user = user
return previous_user
def request(self, request_type, url, params={}, sudo=None, page=None,
per_page=None):
if sudo is None and self._user is not None:
sudo = _self.user
if per_page is None and self._per_page is None:
per_page = self._per_page
r = ApiRequest(request_type, self._base_url + '/' + url.lstrip('/'),
self._token, params, sudo, page, per_page)
return r.content
@property
def users(self):
- u = self.request(RequestType.GET, '/users')
- return Users(u)
+ return Users(self)
- def users_by_name(self, name):
- params = {'search': name}
- u = self.request(RequestType.GET, '/users', params=params)
- return Users(u)
-
|
Create exactly one users function.
|
## Code Before:
_defaults = {
'api_url': 'api/v3',
}
from .apirequest import ApiRequest, RequestType
from .users import Users
class Pyglab(object):
def __init__(self, url, token, api_url=_defaults['api_url']):
self._base_url = url.rstrip('/') + '/' + api_url.strip()
self._token = token
self._user = None
self._per_page = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self._user
self._user = user
return previous_user
def request(self, request_type, url, params={}, sudo=None, page=None,
per_page=None):
if sudo is None and self._user is not None:
sudo = _self.user
if per_page is None and self._per_page is None:
per_page = self._per_page
r = ApiRequest(request_type, self._base_url + '/' + url.lstrip('/'),
self._token, params, sudo, page, per_page)
return r.content
@property
def users(self):
u = self.request(RequestType.GET, '/users')
return Users(u)
def users_by_name(self, name):
params = {'search': name}
u = self.request(RequestType.GET, '/users', params=params)
return Users(u)
## Instruction:
Create exactly one users function.
## Code After:
_defaults = {
'api_url': 'api/v3',
}
from .apirequest import ApiRequest, RequestType
from .users import Users
class Pyglab(object):
def __init__(self, url, token, api_url=_defaults['api_url']):
self._base_url = url.rstrip('/') + '/' + api_url.strip()
self._token = token
self._user = None
self._per_page = None
def sudo(self, user):
"""Permanently set a different username. Returns the old username."""
previous_user = self._user
self._user = user
return previous_user
def request(self, request_type, url, params={}, sudo=None, page=None,
per_page=None):
if sudo is None and self._user is not None:
sudo = _self.user
if per_page is None and self._per_page is None:
per_page = self._per_page
r = ApiRequest(request_type, self._base_url + '/' + url.lstrip('/'),
self._token, params, sudo, page, per_page)
return r.content
@property
def users(self):
return Users(self)
|
...
def users(self):
return Users(self)
...
|
6093d2954861f2783da3e5b8473cb13b0469685b
|
elasticquery/filterquery.py
|
elasticquery/filterquery.py
|
import json
from .util import make_dsl_object, unroll_definitions, unroll_struct
class MetaFilterQuery(type):
def __init__(cls, name, bases, d):
super(MetaFilterQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: make_dsl_object(
cls, key, cls._definitions[key],
*args, **kwargs
)
class BaseFilterQuery(object):
_type = None
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._struct = struct
self._dsl_type = dsl_type
def dict(self):
return {
self._dsl_type: unroll_struct(self._struct)
}
def __str__(self):
return json.dumps(self.dict(), indent=4)
|
import json
from .util import make_dsl_object, unroll_definitions, unroll_struct
class MetaFilterQuery(type):
def __init__(cls, name, bases, d):
super(MetaFilterQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: make_dsl_object(
cls, key, cls._definitions[key],
*args, **kwargs
)
class BaseFilterQuery(object):
_type = None
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._struct = struct
self._dsl_type = dsl_type
def dict(self):
dsl_type = self._dsl_type[:1] if self._dsl_type.endswith('_') else self._dsl_type
return {
dsl_type: unroll_struct(self._struct)
}
def __str__(self):
return json.dumps(self.dict(), indent=4)
|
Support nosetests, handle magic names (and_, or_, etc)
|
Support nosetests, handle magic names (and_, or_, etc)
|
Python
|
mit
|
Fizzadar/ElasticQuery,Fizzadar/ElasticQuery
|
import json
from .util import make_dsl_object, unroll_definitions, unroll_struct
class MetaFilterQuery(type):
def __init__(cls, name, bases, d):
super(MetaFilterQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
+ if key == '__test__':
+ return None
+
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: make_dsl_object(
cls, key, cls._definitions[key],
*args, **kwargs
)
class BaseFilterQuery(object):
_type = None
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._struct = struct
self._dsl_type = dsl_type
def dict(self):
+ dsl_type = self._dsl_type[:1] if self._dsl_type.endswith('_') else self._dsl_type
+
return {
- self._dsl_type: unroll_struct(self._struct)
+ dsl_type: unroll_struct(self._struct)
}
def __str__(self):
return json.dumps(self.dict(), indent=4)
|
Support nosetests, handle magic names (and_, or_, etc)
|
## Code Before:
import json
from .util import make_dsl_object, unroll_definitions, unroll_struct
class MetaFilterQuery(type):
def __init__(cls, name, bases, d):
super(MetaFilterQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: make_dsl_object(
cls, key, cls._definitions[key],
*args, **kwargs
)
class BaseFilterQuery(object):
_type = None
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._struct = struct
self._dsl_type = dsl_type
def dict(self):
return {
self._dsl_type: unroll_struct(self._struct)
}
def __str__(self):
return json.dumps(self.dict(), indent=4)
## Instruction:
Support nosetests, handle magic names (and_, or_, etc)
## Code After:
import json
from .util import make_dsl_object, unroll_definitions, unroll_struct
class MetaFilterQuery(type):
def __init__(cls, name, bases, d):
super(MetaFilterQuery, cls).__init__(name, bases, d)
unroll_definitions(cls._definitions)
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
raise cls._exception(key)
return lambda *args, **kwargs: make_dsl_object(
cls, key, cls._definitions[key],
*args, **kwargs
)
class BaseFilterQuery(object):
_type = None
_struct = None
_dsl_type = None
def __init__(self, dsl_type, struct):
self._struct = struct
self._dsl_type = dsl_type
def dict(self):
dsl_type = self._dsl_type[:1] if self._dsl_type.endswith('_') else self._dsl_type
return {
dsl_type: unroll_struct(self._struct)
}
def __str__(self):
return json.dumps(self.dict(), indent=4)
|
# ... existing code ...
def __getattr__(cls, key):
if key == '__test__':
return None
if key not in cls._definitions:
# ... modified code ...
def dict(self):
dsl_type = self._dsl_type[:1] if self._dsl_type.endswith('_') else self._dsl_type
return {
dsl_type: unroll_struct(self._struct)
}
# ... rest of the code ...
|
188e4e6d3419793ae8811eb66d94e31849af3461
|
conf_site/core/forms.py
|
conf_site/core/forms.py
|
from django import forms
class CsvUploadForm(forms.Form):
"""Form for uploading a CSV file."""
csv_file = forms.FileField(label="Please upload a CSV file.")
|
from django import forms
class CsvUploadForm(forms.Form):
"""Form for uploading a CSV file."""
csv_file = forms.FileField(label="Please upload a CSV file.")
def _is_csv_file(self, file_data):
"""
Test whether an uploaded file is a CSV file.
Returns a list of a boolean of the results and the uploaded content
type.
"""
uploaded_content_type = getattr(file_data, "content_type", "text/csv")
return [uploaded_content_type == "text/csv", uploaded_content_type]
def clean_csv_file(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
results = self._is_csv_file(data["csv_file"])
if not results[0]:
raise forms.ValidationError(
"Only CSV files ('text/csv') can be uploaded with this form. "
"You uploaded a '{}' file.".format(results[1])
)
return data
|
Test whether uploaded CSV has correct mime type.
|
Test whether uploaded CSV has correct mime type.
Add cleaning method to CsvUploadForm to ensure that uploaded file has
either the mime type for a CSV file or no mime type. Return error if
user uploads a different mime type.
|
Python
|
mit
|
pydata/conf_site,pydata/conf_site,pydata/conf_site
|
from django import forms
class CsvUploadForm(forms.Form):
"""Form for uploading a CSV file."""
csv_file = forms.FileField(label="Please upload a CSV file.")
+ def _is_csv_file(self, file_data):
+ """
+ Test whether an uploaded file is a CSV file.
+
+ Returns a list of a boolean of the results and the uploaded content
+ type.
+ """
+ uploaded_content_type = getattr(file_data, "content_type", "text/csv")
+ return [uploaded_content_type == "text/csv", uploaded_content_type]
+
+ def clean_csv_file(self, *args, **kwargs):
+ data = super().clean(*args, **kwargs)
+ results = self._is_csv_file(data["csv_file"])
+ if not results[0]:
+ raise forms.ValidationError(
+ "Only CSV files ('text/csv') can be uploaded with this form. "
+ "You uploaded a '{}' file.".format(results[1])
+ )
+ return data
+
|
Test whether uploaded CSV has correct mime type.
|
## Code Before:
from django import forms
class CsvUploadForm(forms.Form):
"""Form for uploading a CSV file."""
csv_file = forms.FileField(label="Please upload a CSV file.")
## Instruction:
Test whether uploaded CSV has correct mime type.
## Code After:
from django import forms
class CsvUploadForm(forms.Form):
"""Form for uploading a CSV file."""
csv_file = forms.FileField(label="Please upload a CSV file.")
def _is_csv_file(self, file_data):
"""
Test whether an uploaded file is a CSV file.
Returns a list of a boolean of the results and the uploaded content
type.
"""
uploaded_content_type = getattr(file_data, "content_type", "text/csv")
return [uploaded_content_type == "text/csv", uploaded_content_type]
def clean_csv_file(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
results = self._is_csv_file(data["csv_file"])
if not results[0]:
raise forms.ValidationError(
"Only CSV files ('text/csv') can be uploaded with this form. "
"You uploaded a '{}' file.".format(results[1])
)
return data
|
// ... existing code ...
csv_file = forms.FileField(label="Please upload a CSV file.")
def _is_csv_file(self, file_data):
"""
Test whether an uploaded file is a CSV file.
Returns a list of a boolean of the results and the uploaded content
type.
"""
uploaded_content_type = getattr(file_data, "content_type", "text/csv")
return [uploaded_content_type == "text/csv", uploaded_content_type]
def clean_csv_file(self, *args, **kwargs):
data = super().clean(*args, **kwargs)
results = self._is_csv_file(data["csv_file"])
if not results[0]:
raise forms.ValidationError(
"Only CSV files ('text/csv') can be uploaded with this form. "
"You uploaded a '{}' file.".format(results[1])
)
return data
// ... rest of the code ...
|
8d34496986e68de8aa1a691a494da08f523cb034
|
oauthenticator/tests/conftest.py
|
oauthenticator/tests/conftest.py
|
"""Py.Test fixtures"""
from tornado.httpclient import AsyncHTTPClient
from pytest import fixture
from .mocks import MockAsyncHTTPClient
@fixture
def client(io_loop, request):
"""Return mocked AsyncHTTPClient"""
before = AsyncHTTPClient.configured_class()
AsyncHTTPClient.configure(MockAsyncHTTPClient)
request.addfinalizer(lambda : AsyncHTTPClient.configure(before))
c = AsyncHTTPClient()
assert isinstance(c, MockAsyncHTTPClient)
return c
|
"""Py.Test fixtures"""
from tornado.httpclient import AsyncHTTPClient
from tornado import ioloop
from pytest import fixture
from .mocks import MockAsyncHTTPClient
@fixture
def io_loop(request):
"""Same as pytest-tornado.io_loop, adapted for tornado 5"""
io_loop = ioloop.IOLoop()
io_loop.make_current()
def _close():
io_loop.clear_current()
io_loop.close(all_fds=True)
request.addfinalizer(_close)
return io_loop
@fixture
def client(io_loop, request):
"""Return mocked AsyncHTTPClient"""
before = AsyncHTTPClient.configured_class()
AsyncHTTPClient.configure(MockAsyncHTTPClient)
request.addfinalizer(lambda : AsyncHTTPClient.configure(before))
c = AsyncHTTPClient()
assert isinstance(c, MockAsyncHTTPClient)
return c
|
Add ioloop fixture that works with tornado 5
|
Add ioloop fixture that works with tornado 5
|
Python
|
bsd-3-clause
|
maltevogl/oauthenticator,minrk/oauthenticator,NickolausDS/oauthenticator,jupyterhub/oauthenticator,jupyter/oauthenticator,jupyter/oauthenticator,enolfc/oauthenticator
|
"""Py.Test fixtures"""
from tornado.httpclient import AsyncHTTPClient
+ from tornado import ioloop
from pytest import fixture
from .mocks import MockAsyncHTTPClient
+
+
+ @fixture
+ def io_loop(request):
+ """Same as pytest-tornado.io_loop, adapted for tornado 5"""
+ io_loop = ioloop.IOLoop()
+ io_loop.make_current()
+
+ def _close():
+ io_loop.clear_current()
+ io_loop.close(all_fds=True)
+
+ request.addfinalizer(_close)
+ return io_loop
+
@fixture
def client(io_loop, request):
"""Return mocked AsyncHTTPClient"""
before = AsyncHTTPClient.configured_class()
AsyncHTTPClient.configure(MockAsyncHTTPClient)
request.addfinalizer(lambda : AsyncHTTPClient.configure(before))
c = AsyncHTTPClient()
assert isinstance(c, MockAsyncHTTPClient)
return c
|
Add ioloop fixture that works with tornado 5
|
## Code Before:
"""Py.Test fixtures"""
from tornado.httpclient import AsyncHTTPClient
from pytest import fixture
from .mocks import MockAsyncHTTPClient
@fixture
def client(io_loop, request):
"""Return mocked AsyncHTTPClient"""
before = AsyncHTTPClient.configured_class()
AsyncHTTPClient.configure(MockAsyncHTTPClient)
request.addfinalizer(lambda : AsyncHTTPClient.configure(before))
c = AsyncHTTPClient()
assert isinstance(c, MockAsyncHTTPClient)
return c
## Instruction:
Add ioloop fixture that works with tornado 5
## Code After:
"""Py.Test fixtures"""
from tornado.httpclient import AsyncHTTPClient
from tornado import ioloop
from pytest import fixture
from .mocks import MockAsyncHTTPClient
@fixture
def io_loop(request):
"""Same as pytest-tornado.io_loop, adapted for tornado 5"""
io_loop = ioloop.IOLoop()
io_loop.make_current()
def _close():
io_loop.clear_current()
io_loop.close(all_fds=True)
request.addfinalizer(_close)
return io_loop
@fixture
def client(io_loop, request):
"""Return mocked AsyncHTTPClient"""
before = AsyncHTTPClient.configured_class()
AsyncHTTPClient.configure(MockAsyncHTTPClient)
request.addfinalizer(lambda : AsyncHTTPClient.configure(before))
c = AsyncHTTPClient()
assert isinstance(c, MockAsyncHTTPClient)
return c
|
# ... existing code ...
from tornado.httpclient import AsyncHTTPClient
from tornado import ioloop
from pytest import fixture
# ... modified code ...
from .mocks import MockAsyncHTTPClient
@fixture
def io_loop(request):
"""Same as pytest-tornado.io_loop, adapted for tornado 5"""
io_loop = ioloop.IOLoop()
io_loop.make_current()
def _close():
io_loop.clear_current()
io_loop.close(all_fds=True)
request.addfinalizer(_close)
return io_loop
# ... rest of the code ...
|
c970661c4525e0f3a9c77935ccfbef62742b18d4
|
csympy/__init__.py
|
csympy/__init__.py
|
from .lib.csympy_wrapper import (Symbol, Integer, sympify, SympifyError, Add,
Mul, Pow, sin, cos, sqrt, function_symbol, I)
from .utilities import var
|
from .lib.csympy_wrapper import (Symbol, Integer, sympify, SympifyError, Add,
Mul, Pow, sin, cos, sqrt, function_symbol, I)
from .utilities import var
def test():
import pytest, os
return not pytest.cmdline.main(
[os.path.dirname(os.path.abspath(__file__))])
|
Add test function so tests can be run from within python terminal
|
Add test function so tests can be run from within python terminal
import csympy
csympy.test()
|
Python
|
mit
|
symengine/symengine.py,bjodah/symengine.py,bjodah/symengine.py,symengine/symengine.py,symengine/symengine.py,bjodah/symengine.py
|
from .lib.csympy_wrapper import (Symbol, Integer, sympify, SympifyError, Add,
Mul, Pow, sin, cos, sqrt, function_symbol, I)
from .utilities import var
+ def test():
+ import pytest, os
+ return not pytest.cmdline.main(
+ [os.path.dirname(os.path.abspath(__file__))])
+
|
Add test function so tests can be run from within python terminal
|
## Code Before:
from .lib.csympy_wrapper import (Symbol, Integer, sympify, SympifyError, Add,
Mul, Pow, sin, cos, sqrt, function_symbol, I)
from .utilities import var
## Instruction:
Add test function so tests can be run from within python terminal
## Code After:
from .lib.csympy_wrapper import (Symbol, Integer, sympify, SympifyError, Add,
Mul, Pow, sin, cos, sqrt, function_symbol, I)
from .utilities import var
def test():
import pytest, os
return not pytest.cmdline.main(
[os.path.dirname(os.path.abspath(__file__))])
|
...
from .utilities import var
def test():
import pytest, os
return not pytest.cmdline.main(
[os.path.dirname(os.path.abspath(__file__))])
...
|
25e7b4a2e297e9944b5065851c6e65eb40b11bcd
|
scripts/examples/OpenMV/99-Tests/unittests.py
|
scripts/examples/OpenMV/99-Tests/unittests.py
|
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, passed):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + ("PASSED" if passed == True else "FAILED"))
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_passed = True
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
test_passed = False
print_result(test, test_passed)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, result):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + result)
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
test_result = "DISABLED" if "unavailable" in str(e) else "FAILED"
print_result(test, test_result)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
Update unittest to ignore disabled functions.
|
Update unittest to ignore disabled functions.
|
Python
|
mit
|
kwagyeman/openmv,kwagyeman/openmv,iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv,iabdalkader/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv
|
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
- def print_result(test, passed):
+ def print_result(test, result):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
- print(s + padding + ("PASSED" if passed == True else "FAILED"))
+ print(s + padding + result)
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
- test_passed = True
+ test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
- test_passed = False
+ test_result = "DISABLED" if "unavailable" in str(e) else "FAILED"
- print_result(test, test_passed)
+ print_result(test, test_result)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
Update unittest to ignore disabled functions.
|
## Code Before:
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, passed):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + ("PASSED" if passed == True else "FAILED"))
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_passed = True
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
test_passed = False
print_result(test, test_passed)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
## Instruction:
Update unittest to ignore disabled functions.
## Code After:
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "unittest/temp"
DATA_DIR = "unittest/data"
SCRIPT_DIR = "unittest/script"
if not (TEST_DIR in os.listdir("")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, result):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + result)
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
try:
exec(open(test_path).read())
gc.collect()
if unittest(DATA_DIR, TEMP_DIR) == False:
raise Exception()
except Exception as e:
test_failed = True
test_result = "DISABLED" if "unavailable" in str(e) else "FAILED"
print_result(test, test_result)
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
# ... existing code ...
def print_result(test, result):
s = "Unittest (%s)"%(test)
# ... modified code ...
padding = "."*(60-len(s))
print(s + padding + result)
...
if test.endswith(".py"):
test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
...
test_failed = True
test_result = "DISABLED" if "unavailable" in str(e) else "FAILED"
print_result(test, test_result)
# ... rest of the code ...
|
4e7c71304710178dbd668073ecfca59e8da459df
|
tacker/db/models_v1.py
|
tacker/db/models_v1.py
|
import sqlalchemy as sa
from tacker.openstack.common import uuidutils
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
class HasStatusDescription(object):
"""Status with description mixin."""
status = sa.Column(sa.String(16), nullable=False)
status_description = sa.Column(sa.String(255))
|
import sqlalchemy as sa
from tacker.openstack.common import uuidutils
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
|
Remove unused model class from db layer
|
Remove unused model class from db layer
Change-Id: I42cf91dc3132d0d0f2f509b5350958b7499c68f9
|
Python
|
apache-2.0
|
zeinsteinz/tacker,openstack/tacker,priya-pp/Tacker,trozet/tacker,stackforge/tacker,openstack/tacker,trozet/tacker,priya-pp/Tacker,openstack/tacker,stackforge/tacker,zeinsteinz/tacker
|
import sqlalchemy as sa
from tacker.openstack.common import uuidutils
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
-
- class HasStatusDescription(object):
- """Status with description mixin."""
-
- status = sa.Column(sa.String(16), nullable=False)
- status_description = sa.Column(sa.String(255))
-
|
Remove unused model class from db layer
|
## Code Before:
import sqlalchemy as sa
from tacker.openstack.common import uuidutils
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
class HasStatusDescription(object):
"""Status with description mixin."""
status = sa.Column(sa.String(16), nullable=False)
status_description = sa.Column(sa.String(255))
## Instruction:
Remove unused model class from db layer
## Code After:
import sqlalchemy as sa
from tacker.openstack.common import uuidutils
class HasTenant(object):
"""Tenant mixin, add to subclasses that have a tenant."""
# NOTE(jkoelker) tenant_id is just a free form string ;(
tenant_id = sa.Column(sa.String(255))
class HasId(object):
"""id mixin, add to subclasses that have an id."""
id = sa.Column(sa.String(36),
primary_key=True,
default=uuidutils.generate_uuid)
|
# ... existing code ...
default=uuidutils.generate_uuid)
# ... rest of the code ...
|
eacc1f88f7e34e26c3a4d29ec009b4984c10a345
|
SimPEG/Mesh/__init__.py
|
SimPEG/Mesh/__init__.py
|
from TensorMesh import TensorMesh
from CylMesh import CylMesh
from Cyl1DMesh import Cyl1DMesh
from LogicallyRectMesh import LogicallyRectMesh
from TreeMesh import TreeMesh
from BaseMesh import BaseMesh
|
from TensorMesh import TensorMesh
from CylMesh import CylMesh
from LogicallyRectMesh import LogicallyRectMesh
from TreeMesh import TreeMesh
from BaseMesh import BaseMesh
|
Remove Cyl1DMesh from init file...
|
Remove Cyl1DMesh from init file...
|
Python
|
mit
|
simpeg/discretize,simpeg/simpeg,simpeg/discretize,simpeg/discretize
|
from TensorMesh import TensorMesh
from CylMesh import CylMesh
- from Cyl1DMesh import Cyl1DMesh
from LogicallyRectMesh import LogicallyRectMesh
from TreeMesh import TreeMesh
from BaseMesh import BaseMesh
|
Remove Cyl1DMesh from init file...
|
## Code Before:
from TensorMesh import TensorMesh
from CylMesh import CylMesh
from Cyl1DMesh import Cyl1DMesh
from LogicallyRectMesh import LogicallyRectMesh
from TreeMesh import TreeMesh
from BaseMesh import BaseMesh
## Instruction:
Remove Cyl1DMesh from init file...
## Code After:
from TensorMesh import TensorMesh
from CylMesh import CylMesh
from LogicallyRectMesh import LogicallyRectMesh
from TreeMesh import TreeMesh
from BaseMesh import BaseMesh
|
# ... existing code ...
from CylMesh import CylMesh
from LogicallyRectMesh import LogicallyRectMesh
# ... rest of the code ...
|
3b99493e606a04a6338d8ee2fc299595d19b2a44
|
fabfile.py
|
fabfile.py
|
from fabric.api import local, cd
def docs():
local("./bin/docs")
local("./bin/python setup.py upload_sphinx --upload-dir=docs/html")
def release():
# update version id in setup.py, changelog and docs/source/conf.py
local("python setup.py sdist --formats=gztar,zip upload")
|
from fabric.api import local, cd
def docs():
local("./bin/docs")
local("./bin/python setup.py upload_sphinx --upload-dir=docs/html")
def release():
"""Update version id in setup.py, changelog and docs/source/conf.py."""
local(("python setup.py bdist_egg sdist --formats=bztar,gztar,zip "
"upload --show-response"))
|
Add BzTar and EGG format to Fabric script
|
Add BzTar and EGG format to Fabric script
|
Python
|
bsd-3-clause
|
janusnic/importd,pombredanne/importd,arpitremarkable/importd,pombredanne/importd,akshar-raaj/importd,hitul007/importd,akshar-raaj/importd,hitul007/importd,janusnic/importd,arpitremarkable/importd
|
+
+
from fabric.api import local, cd
+
def docs():
local("./bin/docs")
local("./bin/python setup.py upload_sphinx --upload-dir=docs/html")
+
def release():
- # update version id in setup.py, changelog and docs/source/conf.py
+ """Update version id in setup.py, changelog and docs/source/conf.py."""
- local("python setup.py sdist --formats=gztar,zip upload")
+ local(("python setup.py bdist_egg sdist --formats=bztar,gztar,zip "
+ "upload --show-response"))
|
Add BzTar and EGG format to Fabric script
|
## Code Before:
from fabric.api import local, cd
def docs():
local("./bin/docs")
local("./bin/python setup.py upload_sphinx --upload-dir=docs/html")
def release():
# update version id in setup.py, changelog and docs/source/conf.py
local("python setup.py sdist --formats=gztar,zip upload")
## Instruction:
Add BzTar and EGG format to Fabric script
## Code After:
from fabric.api import local, cd
def docs():
local("./bin/docs")
local("./bin/python setup.py upload_sphinx --upload-dir=docs/html")
def release():
"""Update version id in setup.py, changelog and docs/source/conf.py."""
local(("python setup.py bdist_egg sdist --formats=bztar,gztar,zip "
"upload --show-response"))
|
// ... existing code ...
from fabric.api import local, cd
// ... modified code ...
def release():
"""Update version id in setup.py, changelog and docs/source/conf.py."""
local(("python setup.py bdist_egg sdist --formats=bztar,gztar,zip "
"upload --show-response"))
// ... rest of the code ...
|
6ae83f01eacceb140435e72a216fa88bd97f2b0c
|
pyswarms/utils/console_utils.py
|
pyswarms/utils/console_utils.py
|
""" console_utils.py: various tools for printing into console """
def cli_print(message, verbosity, threshold):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
"""
if verbosity >= threshold:
print(message)
else:
pass
def end_report(cost, pos, verbosity):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ('[ ' + 3 * '{:3f} ' + '...]').format(*list(pos))
else:
out = list(pos)
template = ('================================\n'
'Optimization finished!\n'
'Final cost: {:06.4f}\n'
'Best value: {}\n').format(cost, out)
if verbosity >= 1:
print(template)
|
""" console_utils.py: various tools for printing into console """
# Import from __future__
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Import modules
import logging
def cli_print(message, verbosity, threshold, logger):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
logger : logging.getLogger
logger instance
"""
if verbosity >= threshold:
logger.info(message)
else:
pass
def end_report(cost, pos, verbosity, logger):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
logger : logging.getLogger
logger instance
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ('[ ' + 3 * '{:3f} ' + '...]').format(*list(pos))
else:
out = list(pos)
template = ('================================\n'
'Optimization finished!\n'
'Final cost: {:06.4f}\n'
'Best value: {}\n').format(cost, out)
if verbosity >= 1:
logger.info(template)
|
Add support for logging module
|
Add support for logging module
This package now prints using the logging module. It can still print
onto the console, but an additional tag like INFO, DEBUG, etc. are now
being used.
Author: ljvmiranda921
|
Python
|
mit
|
ljvmiranda921/pyswarms,ljvmiranda921/pyswarms
|
""" console_utils.py: various tools for printing into console """
+ # Import from __future__
+ from __future__ import with_statement
+ from __future__ import absolute_import
+ from __future__ import print_function
+
+ # Import modules
+ import logging
+
- def cli_print(message, verbosity, threshold):
+ def cli_print(message, verbosity, threshold, logger):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
+ logger : logging.getLogger
+ logger instance
"""
if verbosity >= threshold:
- print(message)
+ logger.info(message)
else:
pass
- def end_report(cost, pos, verbosity):
+ def end_report(cost, pos, verbosity, logger):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
+ logger : logging.getLogger
+ logger instance
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ('[ ' + 3 * '{:3f} ' + '...]').format(*list(pos))
else:
out = list(pos)
template = ('================================\n'
'Optimization finished!\n'
'Final cost: {:06.4f}\n'
'Best value: {}\n').format(cost, out)
if verbosity >= 1:
- print(template)
+ logger.info(template)
|
Add support for logging module
|
## Code Before:
""" console_utils.py: various tools for printing into console """
def cli_print(message, verbosity, threshold):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
"""
if verbosity >= threshold:
print(message)
else:
pass
def end_report(cost, pos, verbosity):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ('[ ' + 3 * '{:3f} ' + '...]').format(*list(pos))
else:
out = list(pos)
template = ('================================\n'
'Optimization finished!\n'
'Final cost: {:06.4f}\n'
'Best value: {}\n').format(cost, out)
if verbosity >= 1:
print(template)
## Instruction:
Add support for logging module
## Code After:
""" console_utils.py: various tools for printing into console """
# Import from __future__
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Import modules
import logging
def cli_print(message, verbosity, threshold, logger):
"""Helper function to print console output
Parameters
----------
message : str
the message to be printed into the console
verbosity : int
verbosity setting of the user
threshold : int
threshold for printing
logger : logging.getLogger
logger instance
"""
if verbosity >= threshold:
logger.info(message)
else:
pass
def end_report(cost, pos, verbosity, logger):
"""Helper function to print a simple report at the end of the
run. This always has a threshold of 1.
Parameters
----------
cost : float
final cost from the optimization procedure.
pos : numpy.ndarray or list
best position found
verbosity : int
verbosity setting of the user.
logger : logging.getLogger
logger instance
"""
# Cuts the length of the best position if it's too long
if len(list(pos)) > 3:
out = ('[ ' + 3 * '{:3f} ' + '...]').format(*list(pos))
else:
out = list(pos)
template = ('================================\n'
'Optimization finished!\n'
'Final cost: {:06.4f}\n'
'Best value: {}\n').format(cost, out)
if verbosity >= 1:
logger.info(template)
|
# ... existing code ...
# Import from __future__
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
# Import modules
import logging
def cli_print(message, verbosity, threshold, logger):
"""Helper function to print console output
# ... modified code ...
threshold for printing
logger : logging.getLogger
logger instance
...
if verbosity >= threshold:
logger.info(message)
else:
...
def end_report(cost, pos, verbosity, logger):
"""Helper function to print a simple report at the end of the
...
verbosity setting of the user.
logger : logging.getLogger
logger instance
"""
...
if verbosity >= 1:
logger.info(template)
# ... rest of the code ...
|
7698d7256f7a88b02b3dd02b411532cb4a6a46aa
|
cmi/modify_uri.py
|
cmi/modify_uri.py
|
import os
import sys
import glob
from subprocess import check_call
try:
install_share_dir = sys.argv[1]
cca_dir = os.path.join(install_share_dir, "cca")
print("Modifying *.cca files in " + cca_dir)
for f in glob.glob(os.path.join(cca_dir, "*.cca")):
check_call(["sed", "--in-place", "s/\.la/.so/", f])
except:
print("Error in post-install modification of *.cca files.")
sys.exit(1)
|
import os
import sys
import glob
from subprocess import check_call
try:
install_share_dir = sys.argv[1]
cca_dir = os.path.join(install_share_dir, "cca")
print("Modifying *.cca files in " + cca_dir)
for f in glob.glob(os.path.join(cca_dir, "*.cca")):
check_call(["sed", "--in-place", "s/\.la/.so/", f])
except:
print("Error in modification of *.cca files.")
sys.exit(1)
|
Change wording in error message
|
Change wording in error message
|
Python
|
mit
|
csdms/rpm_tools,csdms/rpm_tools
|
import os
import sys
import glob
from subprocess import check_call
try:
install_share_dir = sys.argv[1]
cca_dir = os.path.join(install_share_dir, "cca")
print("Modifying *.cca files in " + cca_dir)
for f in glob.glob(os.path.join(cca_dir, "*.cca")):
check_call(["sed", "--in-place", "s/\.la/.so/", f])
except:
- print("Error in post-install modification of *.cca files.")
+ print("Error in modification of *.cca files.")
sys.exit(1)
|
Change wording in error message
|
## Code Before:
import os
import sys
import glob
from subprocess import check_call
try:
install_share_dir = sys.argv[1]
cca_dir = os.path.join(install_share_dir, "cca")
print("Modifying *.cca files in " + cca_dir)
for f in glob.glob(os.path.join(cca_dir, "*.cca")):
check_call(["sed", "--in-place", "s/\.la/.so/", f])
except:
print("Error in post-install modification of *.cca files.")
sys.exit(1)
## Instruction:
Change wording in error message
## Code After:
import os
import sys
import glob
from subprocess import check_call
try:
install_share_dir = sys.argv[1]
cca_dir = os.path.join(install_share_dir, "cca")
print("Modifying *.cca files in " + cca_dir)
for f in glob.glob(os.path.join(cca_dir, "*.cca")):
check_call(["sed", "--in-place", "s/\.la/.so/", f])
except:
print("Error in modification of *.cca files.")
sys.exit(1)
|
...
except:
print("Error in modification of *.cca files.")
sys.exit(1)
...
|
bcde8104bd77f18d7061f7f4d4831ad49644a913
|
common/management/commands/build_index.py
|
common/management/commands/build_index.py
|
from django.core.management import BaseCommand
from django.db.models import get_app, get_models
from django.conf import settings
from common.utilities.search_utils import index_instance
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Provide this if you want to create a test index')
def handle(self, *args, **options):
# optimize this to index in bulk
apps_lists = settings.LOCAL_APPS
for app_name in apps_lists:
app = get_app(app_name)
for model in get_models(app):
all_instances = model.objects.all()[0:3] \
if options.get('test') else model.objects.all()
[index_instance(obj) for obj in all_instances]
message = "Indexed {} {}".format(
all_instances.count(),
model._meta.verbose_name_plural.capitalize())
self.stdout.write(message)
self.stdout.write("Finished indexing")
|
from django.core.management import BaseCommand
from django.db.models import get_app, get_models
from django.conf import settings
from common.utilities.search_utils import index_instance
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Provide this if you want to create a test index')
def handle(self, *args, **options):
# optimize this to index in bulk
apps_lists = settings.LOCAL_APPS
for app_name in apps_lists:
app = get_app(app_name)
for model in get_models(app):
if model.__name__.lower() != 'testmodel':
all_instances = model.objects.all()[0:3] \
if options.get('test') else model.objects.all()
[index_instance(obj) for obj in all_instances]
message = "Indexed {} {}".format(
all_instances.count(),
model._meta.verbose_name_plural.capitalize())
self.stdout.write(message)
else:
# relation "common_testmodel" does not exist
# Will be fixed
pass
self.stdout.write("Finished indexing")
|
Check the model beig indexed
|
Check the model beig indexed
|
Python
|
mit
|
urandu/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,urandu/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,urandu/mfl_api,urandu/mfl_api
|
from django.core.management import BaseCommand
from django.db.models import get_app, get_models
from django.conf import settings
from common.utilities.search_utils import index_instance
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Provide this if you want to create a test index')
def handle(self, *args, **options):
# optimize this to index in bulk
apps_lists = settings.LOCAL_APPS
for app_name in apps_lists:
app = get_app(app_name)
for model in get_models(app):
+ if model.__name__.lower() != 'testmodel':
- all_instances = model.objects.all()[0:3] \
+ all_instances = model.objects.all()[0:3] \
- if options.get('test') else model.objects.all()
+ if options.get('test') else model.objects.all()
- [index_instance(obj) for obj in all_instances]
+ [index_instance(obj) for obj in all_instances]
- message = "Indexed {} {}".format(
+ message = "Indexed {} {}".format(
- all_instances.count(),
+ all_instances.count(),
- model._meta.verbose_name_plural.capitalize())
+ model._meta.verbose_name_plural.capitalize())
- self.stdout.write(message)
+ self.stdout.write(message)
+ else:
+ # relation "common_testmodel" does not exist
+ # Will be fixed
+ pass
self.stdout.write("Finished indexing")
|
Check the model beig indexed
|
## Code Before:
from django.core.management import BaseCommand
from django.db.models import get_app, get_models
from django.conf import settings
from common.utilities.search_utils import index_instance
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Provide this if you want to create a test index')
def handle(self, *args, **options):
# optimize this to index in bulk
apps_lists = settings.LOCAL_APPS
for app_name in apps_lists:
app = get_app(app_name)
for model in get_models(app):
all_instances = model.objects.all()[0:3] \
if options.get('test') else model.objects.all()
[index_instance(obj) for obj in all_instances]
message = "Indexed {} {}".format(
all_instances.count(),
model._meta.verbose_name_plural.capitalize())
self.stdout.write(message)
self.stdout.write("Finished indexing")
## Instruction:
Check the model beig indexed
## Code After:
from django.core.management import BaseCommand
from django.db.models import get_app, get_models
from django.conf import settings
from common.utilities.search_utils import index_instance
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--test',
action='store_true',
dest='test',
default=False,
help='Provide this if you want to create a test index')
def handle(self, *args, **options):
# optimize this to index in bulk
apps_lists = settings.LOCAL_APPS
for app_name in apps_lists:
app = get_app(app_name)
for model in get_models(app):
if model.__name__.lower() != 'testmodel':
all_instances = model.objects.all()[0:3] \
if options.get('test') else model.objects.all()
[index_instance(obj) for obj in all_instances]
message = "Indexed {} {}".format(
all_instances.count(),
model._meta.verbose_name_plural.capitalize())
self.stdout.write(message)
else:
# relation "common_testmodel" does not exist
# Will be fixed
pass
self.stdout.write("Finished indexing")
|
# ... existing code ...
for model in get_models(app):
if model.__name__.lower() != 'testmodel':
all_instances = model.objects.all()[0:3] \
if options.get('test') else model.objects.all()
[index_instance(obj) for obj in all_instances]
message = "Indexed {} {}".format(
all_instances.count(),
model._meta.verbose_name_plural.capitalize())
self.stdout.write(message)
else:
# relation "common_testmodel" does not exist
# Will be fixed
pass
self.stdout.write("Finished indexing")
# ... rest of the code ...
|
cadee051a462de765bab59ac42d6b372fa49c033
|
examples/logfile.py
|
examples/logfile.py
|
from __future__ import unicode_literals, print_function
from twisted.internet.task import react
from eliot.logwriter import ThreadedFileWriter
from eliot import Message, Logger, addDestination
_logger = Logger()
def main(reactor):
print("Logging to example-eliot.log...")
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
addDestination(logWriter)
# Manually start the service. Normally we'd register ThreadedFileWriter
# with the usual Twisted Service/Application infrastructure.
logWriter.startService()
# Log a message:
Message.new(value="hello", another=1).write(_logger)
# Manually stop the service.
done = logWriter.stopService()
return done
if __name__ == '__main__':
react(main, [])
|
from __future__ import unicode_literals, print_function
from twisted.internet.task import react
from eliot.logwriter import ThreadedFileWriter
from eliot import Message, Logger
_logger = Logger()
def main(reactor):
print("Logging to example-eliot.log...")
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
# Manually start the service, which will add it as a
# destination. Normally we'd register ThreadedFileWriter with the usual
# Twisted Service/Application infrastructure.
logWriter.startService()
# Log a message:
Message.new(value="hello", another=1).write(_logger)
# Manually stop the service.
done = logWriter.stopService()
return done
if __name__ == '__main__':
react(main, [])
|
Fix bug where the service was added as a destination one time too many.
|
Fix bug where the service was added as a destination one time too many.
|
Python
|
apache-2.0
|
iffy/eliot,ClusterHQ/eliot,ScatterHQ/eliot,ScatterHQ/eliot,ScatterHQ/eliot
|
from __future__ import unicode_literals, print_function
from twisted.internet.task import react
from eliot.logwriter import ThreadedFileWriter
- from eliot import Message, Logger, addDestination
+ from eliot import Message, Logger
_logger = Logger()
def main(reactor):
print("Logging to example-eliot.log...")
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
- addDestination(logWriter)
- # Manually start the service. Normally we'd register ThreadedFileWriter
+ # Manually start the service, which will add it as a
+ # destination. Normally we'd register ThreadedFileWriter with the usual
- # with the usual Twisted Service/Application infrastructure.
+ # Twisted Service/Application infrastructure.
logWriter.startService()
# Log a message:
Message.new(value="hello", another=1).write(_logger)
# Manually stop the service.
done = logWriter.stopService()
return done
if __name__ == '__main__':
react(main, [])
|
Fix bug where the service was added as a destination one time too many.
|
## Code Before:
from __future__ import unicode_literals, print_function
from twisted.internet.task import react
from eliot.logwriter import ThreadedFileWriter
from eliot import Message, Logger, addDestination
_logger = Logger()
def main(reactor):
print("Logging to example-eliot.log...")
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
addDestination(logWriter)
# Manually start the service. Normally we'd register ThreadedFileWriter
# with the usual Twisted Service/Application infrastructure.
logWriter.startService()
# Log a message:
Message.new(value="hello", another=1).write(_logger)
# Manually stop the service.
done = logWriter.stopService()
return done
if __name__ == '__main__':
react(main, [])
## Instruction:
Fix bug where the service was added as a destination one time too many.
## Code After:
from __future__ import unicode_literals, print_function
from twisted.internet.task import react
from eliot.logwriter import ThreadedFileWriter
from eliot import Message, Logger
_logger = Logger()
def main(reactor):
print("Logging to example-eliot.log...")
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
# Manually start the service, which will add it as a
# destination. Normally we'd register ThreadedFileWriter with the usual
# Twisted Service/Application infrastructure.
logWriter.startService()
# Log a message:
Message.new(value="hello", another=1).write(_logger)
# Manually stop the service.
done = logWriter.stopService()
return done
if __name__ == '__main__':
react(main, [])
|
...
from eliot.logwriter import ThreadedFileWriter
from eliot import Message, Logger
...
logWriter = ThreadedFileWriter(open("example-eliot.log", "ab"), reactor)
# Manually start the service, which will add it as a
# destination. Normally we'd register ThreadedFileWriter with the usual
# Twisted Service/Application infrastructure.
logWriter.startService()
...
|
89593cc22f8de4bdb6d605b2e4d6e04b0d1fcd61
|
microcosm_postgres/types.py
|
microcosm_postgres/types.py
|
from six import text_type
from sqlalchemy.types import TypeDecorator, Unicode
class EnumType(TypeDecorator):
"""
SQLAlchemy enum type that persists the enum name (not value).
Note that this type is very similar to the `ChoiceType` from `sqlalchemy_utils`,
with the key difference being persisting by name (and not value).
"""
impl = Unicode(255)
def __init__(self, enum_class):
self.enum_class = enum_class
@property
def python_type(self):
return self.impl.python_type
def process_bind_param(self, value, dialect):
if value is None:
return None
return text_type(self.enum_class(value).name)
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum_class[value]
|
from enum import Enum
from six import text_type
from sqlalchemy.types import TypeDecorator, Unicode
class EnumType(TypeDecorator):
"""
SQLAlchemy enum type that persists the enum name (not value).
Note that this type is very similar to the `ChoiceType` from `sqlalchemy_utils`,
with the key difference being persisting by name (and not value).
"""
impl = Unicode(255)
def __init__(self, enum_class):
self.enum_class = enum_class
@property
def python_type(self):
return self.impl.python_type
def process_bind_param(self, value, dialect):
if value is None:
return None
if isinstance(value, Enum):
return text_type(self.enum_class(value).name)
return text_type(self.enum_class[value].name)
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum_class[value]
|
Handle non-enum inputs (if they are enum names)
|
Handle non-enum inputs (if they are enum names)
|
Python
|
apache-2.0
|
globality-corp/microcosm-postgres,globality-corp/microcosm-postgres
|
+ from enum import Enum
from six import text_type
from sqlalchemy.types import TypeDecorator, Unicode
class EnumType(TypeDecorator):
"""
SQLAlchemy enum type that persists the enum name (not value).
Note that this type is very similar to the `ChoiceType` from `sqlalchemy_utils`,
with the key difference being persisting by name (and not value).
"""
impl = Unicode(255)
def __init__(self, enum_class):
self.enum_class = enum_class
@property
def python_type(self):
return self.impl.python_type
def process_bind_param(self, value, dialect):
if value is None:
return None
+ if isinstance(value, Enum):
- return text_type(self.enum_class(value).name)
+ return text_type(self.enum_class(value).name)
+ return text_type(self.enum_class[value].name)
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum_class[value]
|
Handle non-enum inputs (if they are enum names)
|
## Code Before:
from six import text_type
from sqlalchemy.types import TypeDecorator, Unicode
class EnumType(TypeDecorator):
"""
SQLAlchemy enum type that persists the enum name (not value).
Note that this type is very similar to the `ChoiceType` from `sqlalchemy_utils`,
with the key difference being persisting by name (and not value).
"""
impl = Unicode(255)
def __init__(self, enum_class):
self.enum_class = enum_class
@property
def python_type(self):
return self.impl.python_type
def process_bind_param(self, value, dialect):
if value is None:
return None
return text_type(self.enum_class(value).name)
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum_class[value]
## Instruction:
Handle non-enum inputs (if they are enum names)
## Code After:
from enum import Enum
from six import text_type
from sqlalchemy.types import TypeDecorator, Unicode
class EnumType(TypeDecorator):
"""
SQLAlchemy enum type that persists the enum name (not value).
Note that this type is very similar to the `ChoiceType` from `sqlalchemy_utils`,
with the key difference being persisting by name (and not value).
"""
impl = Unicode(255)
def __init__(self, enum_class):
self.enum_class = enum_class
@property
def python_type(self):
return self.impl.python_type
def process_bind_param(self, value, dialect):
if value is None:
return None
if isinstance(value, Enum):
return text_type(self.enum_class(value).name)
return text_type(self.enum_class[value].name)
def process_result_value(self, value, dialect):
if value is None:
return None
return self.enum_class[value]
|
...
from enum import Enum
from six import text_type
...
return None
if isinstance(value, Enum):
return text_type(self.enum_class(value).name)
return text_type(self.enum_class[value].name)
...
|
279f0b984209f27743791aca9cf3da7941e5d520
|
zephyr/lib/minify.py
|
zephyr/lib/minify.py
|
from django.conf import settings
from hashlib import sha1
from os import path
from pipeline.compressors import SubProcessCompressor
class ClosureSourceMapCompressor(SubProcessCompressor):
def compress_js(self, js):
# js is the full text of the JavaScript source, and we can't
# easily get either the input file names or the output file
# name. So we just pick a unique arbitrary name. This is
# okay because we can figure out from the source map file
# contents which JavaScript files it corresponds to.
source_map = path.join(settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR,
sha1(js).hexdigest() + '.map')
command = '%s --create_source_map %s' % (
settings.PIPELINE_CLOSURE_BINARY, source_map)
return self.execute_command(command, js)
|
from django.conf import settings
from hashlib import sha1
from os import path
from pipeline.compressors import SubProcessCompressor
class ClosureSourceMapCompressor(SubProcessCompressor):
def compress_js(self, js):
# js is the full text of the JavaScript source, and we can't
# easily get either the input file names or the output file
# name. So we just pick a unique arbitrary name. This is
# okay because we can figure out from the source map file
# contents which JavaScript files it corresponds to.
# As a hack to make things easier, assume that any large input
# corresponds to app.js. This is like 60 times bigger than
# any other input file, at present.
if len(js) > 100000:
source_map_name = 'app.js.map'
else:
source_map_name = sha1(js).hexdigest() + '.map'
source_map = path.join(
settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR, source_map_name)
command = '%s --create_source_map %s' % (
settings.PIPELINE_CLOSURE_BINARY, source_map)
return self.execute_command(command, js)
|
Make it easier to find the source map for app.js
|
Make it easier to find the source map for app.js
(imported from commit bca27c9838573fb4b74e2d269b253a48702c9e1c)
|
Python
|
apache-2.0
|
gigawhitlocks/zulip,hayderimran7/zulip,dhcrzf/zulip,jackrzhang/zulip,MariaFaBella85/zulip,verma-varsha/zulip,esander91/zulip,jonesgithub/zulip,johnnygaddarr/zulip,hackerkid/zulip,joshisa/zulip,Frouk/zulip,arpitpanwar/zulip,qq1012803704/zulip,dawran6/zulip,RobotCaleb/zulip,wavelets/zulip,qq1012803704/zulip,jerryge/zulip,isht3/zulip,itnihao/zulip,hj3938/zulip,ufosky-server/zulip,yuvipanda/zulip,pradiptad/zulip,adnanh/zulip,xuanhan863/zulip,ahmadassaf/zulip,jimmy54/zulip,MariaFaBella85/zulip,dattatreya303/zulip,timabbott/zulip,he15his/zulip,ryanbackman/zulip,Drooids/zulip,PaulPetring/zulip,yocome/zulip,JanzTam/zulip,karamcnair/zulip,j831/zulip,atomic-labs/zulip,rht/zulip,Cheppers/zulip,sharmaeklavya2/zulip,Vallher/zulip,moria/zulip,paxapy/zulip,wangdeshui/zulip,m1ssou/zulip,dwrpayne/zulip,vikas-parashar/zulip,christi3k/zulip,amyliu345/zulip,andersk/zulip,xuxiao/zulip,moria/zulip,bluesea/zulip,aakash-cr7/zulip,aps-sids/zulip,jerryge/zulip,so0k/zulip,cosmicAsymmetry/zulip,Diptanshu8/zulip,vaidap/zulip,PaulPetring/zulip,developerfm/zulip,blaze225/zulip,vikas-parashar/zulip,Qgap/zulip,kou/zulip,PhilSk/zulip,hj3938/zulip,hafeez3000/zulip,paxapy/zulip,ahmadassaf/zulip,andersk/zulip,KingxBanana/zulip,wangdeshui/zulip,proliming/zulip,ufosky-server/zulip,dwrpayne/zulip,hengqujushi/zulip,susansls/zulip,praveenaki/zulip,gigawhitlocks/zulip,DazWorrall/zulip,huangkebo/zulip,technicalpickles/zulip,thomasboyt/zulip,levixie/zulip,guiquanz/zulip,Qgap/zulip,avastu/zulip,yocome/zulip,guiquanz/zulip,umkay/zulip,saitodisse/zulip,mansilladev/zulip,JanzTam/zulip,Jianchun1/zulip,amanharitsh123/zulip,hj3938/zulip,Frouk/zulip,pradiptad/zulip,zofuthan/zulip,dwrpayne/zulip,xuanhan863/zulip,hackerkid/zulip,cosmicAsymmetry/zulip,huangkebo/zulip,kaiyuanheshang/zulip,hafeez3000/zulip,luyifan/zulip,bitemyapp/zulip,adnanh/zulip,jessedhillon/zulip,praveenaki/zulip,xuanhan863/zulip,noroot/zulip,armooo/zulip,reyha/zulip,sharmaeklavya2/zulip,armooo/zulip,verma-varsha/zulip,Batterfii/zulip,suxinde2009/zulip,KingxBanana/zulip,eastlhu/zulip,willingc/zulip,peguin40/zulip,samatdav/zulip,AZtheAsian/zulip,aakash-cr7/zulip,tbutter/zulip,ryansnowboarder/zulip,johnnygaddarr/zulip,stamhe/zulip,atomic-labs/zulip,guiquanz/zulip,dwrpayne/zulip,codeKonami/zulip,ericzhou2008/zulip,amanharitsh123/zulip,SmartPeople/zulip,stamhe/zulip,dwrpayne/zulip,ikasumiwt/zulip,aps-sids/zulip,amallia/zulip,ufosky-server/zulip,voidException/zulip,eeshangarg/zulip,hayderimran7/zulip,karamcnair/zulip,zacps/zulip,dawran6/zulip,sup95/zulip,yuvipanda/zulip,karamcnair/zulip,ericzhou2008/zulip,dattatreya303/zulip,hafeez3000/zulip,shrikrishnaholla/zulip,joshisa/zulip,mohsenSy/zulip,udxxabp/zulip,swinghu/zulip,jeffcao/zulip,ipernet/zulip,peiwei/zulip,vakila/zulip,esander91/zulip,jainayush975/zulip,stamhe/zulip,amyliu345/zulip,niftynei/zulip,KJin99/zulip,guiquanz/zulip,babbage/zulip,shrikrishnaholla/zulip,saitodisse/zulip,Frouk/zulip,m1ssou/zulip,amallia/zulip,KJin99/zulip,babbage/zulip,codeKonami/zulip,levixie/zulip,he15his/zulip,johnnygaddarr/zulip,he15his/zulip,swinghu/zulip,sonali0901/zulip,akuseru/zulip,ryansnowboarder/zulip,tdr130/zulip,dxq-git/zulip,MariaFaBella85/zulip,j831/zulip,zacps/zulip,zofuthan/zulip,joshisa/zulip,natanovia/zulip,vaidap/zulip,tdr130/zulip,thomasboyt/zulip,deer-hope/zulip,dxq-git/zulip,aliceriot/zulip,noroot/zulip,noroot/zulip,wdaher/zulip,souravbadami/zulip,andersk/zulip,ApsOps/zulip,andersk/zulip,susansls/zulip,PaulPetring/zulip,jainayush975/zulip,KJin99/zulip,peguin40/zulip,glovebx/zulip,sup95/zulip,vaidap/zulip,SmartPeople/zulip,zacps/zulip,EasonYi/zulip,schatt/zulip,zachallaun/zulip,jonesgithub/zulip,synicalsyntax/zulip,zofuthan/zulip,wavelets/zulip,vabs22/zulip,armooo/zulip,AZtheAsian/zulip,jphilipsen05/zulip,eastlhu/zulip,JPJPJPOPOP/zulip,lfranchi/zulip,j831/zulip,aakash-cr7/zulip,zwily/zulip,Suninus/zulip,voidException/zulip,udxxabp/zulip,technicalpickles/zulip,krtkmj/zulip,zorojean/zulip,voidException/zulip,jerryge/zulip,rishig/zulip,arpitpanwar/zulip,hackerkid/zulip,glovebx/zulip,bitemyapp/zulip,so0k/zulip,LeeRisk/zulip,MayB/zulip,bowlofstew/zulip,mansilladev/zulip,KJin99/zulip,alliejones/zulip,reyha/zulip,MayB/zulip,gigawhitlocks/zulip,grave-w-grave/zulip,dnmfarrell/zulip,jeffcao/zulip,lfranchi/zulip,hafeez3000/zulip,Juanvulcano/zulip,dxq-git/zulip,timabbott/zulip,fw1121/zulip,dxq-git/zulip,saitodisse/zulip,jackrzhang/zulip,eeshangarg/zulip,krtkmj/zulip,sharmaeklavya2/zulip,hackerkid/zulip,synicalsyntax/zulip,rht/zulip,bastianh/zulip,mdavid/zulip,tdr130/zulip,dwrpayne/zulip,suxinde2009/zulip,brockwhittaker/zulip,ikasumiwt/zulip,Frouk/zulip,Gabriel0402/zulip,Jianchun1/zulip,arpith/zulip,umkay/zulip,christi3k/zulip,tommyip/zulip,thomasboyt/zulip,dotcool/zulip,ufosky-server/zulip,swinghu/zulip,Qgap/zulip,showell/zulip,LeeRisk/zulip,johnnygaddarr/zulip,pradiptad/zulip,arpitpanwar/zulip,zofuthan/zulip,aakash-cr7/zulip,wangdeshui/zulip,easyfmxu/zulip,zulip/zulip,Galexrt/zulip,yocome/zulip,johnnygaddarr/zulip,sonali0901/zulip,Cheppers/zulip,proliming/zulip,Juanvulcano/zulip,peguin40/zulip,jeffcao/zulip,developerfm/zulip,saitodisse/zulip,itnihao/zulip,arpith/zulip,gkotian/zulip,shrikrishnaholla/zulip,technicalpickles/zulip,tommyip/zulip,rishig/zulip,wavelets/zulip,JanzTam/zulip,bastianh/zulip,punchagan/zulip,developerfm/zulip,Qgap/zulip,schatt/zulip,Suninus/zulip,ashwinirudrappa/zulip,jeffcao/zulip,tommyip/zulip,dhcrzf/zulip,noroot/zulip,tbutter/zulip,praveenaki/zulip,Diptanshu8/zulip,MayB/zulip,paxapy/zulip,bluesea/zulip,suxinde2009/zulip,xuanhan863/zulip,brainwane/zulip,arpitpanwar/zulip,xuxiao/zulip,christi3k/zulip,esander91/zulip,zorojean/zulip,pradiptad/zulip,schatt/zulip,firstblade/zulip,KingxBanana/zulip,tommyip/zulip,JPJPJPOPOP/zulip,zachallaun/zulip,adnanh/zulip,eeshangarg/zulip,developerfm/zulip,nicholasbs/zulip,jerryge/zulip,ahmadassaf/zulip,timabbott/zulip,seapasulli/zulip,zorojean/zulip,ApsOps/zulip,synicalsyntax/zulip,sharmaeklavya2/zulip,wweiradio/zulip,MariaFaBella85/zulip,PaulPetring/zulip,codeKonami/zulip,swinghu/zulip,Gabriel0402/zulip,dattatreya303/zulip,timabbott/zulip,bssrdf/zulip,ipernet/zulip,Galexrt/zulip,Qgap/zulip,peguin40/zulip,jphilipsen05/zulip,jonesgithub/zulip,saitodisse/zulip,jrowan/zulip,yocome/zulip,ashwinirudrappa/zulip,pradiptad/zulip,Batterfii/zulip,eeshangarg/zulip,bssrdf/zulip,eastlhu/zulip,deer-hope/zulip,blaze225/zulip,DazWorrall/zulip,udxxabp/zulip,vakila/zulip,sup95/zulip,souravbadami/zulip,punchagan/zulip,huangkebo/zulip,natanovia/zulip,moria/zulip,joshisa/zulip,vikas-parashar/zulip,thomasboyt/zulip,adnanh/zulip,andersk/zulip,zulip/zulip,nicholasbs/zulip,amallia/zulip,firstblade/zulip,reyha/zulip,themass/zulip,tbutter/zulip,shaunstanislaus/zulip,jeffcao/zulip,shrikrishnaholla/zulip,verma-varsha/zulip,synicalsyntax/zulip,vabs22/zulip,JanzTam/zulip,vikas-parashar/zulip,DazWorrall/zulip,guiquanz/zulip,fw1121/zulip,bluesea/zulip,shaunstanislaus/zulip,sup95/zulip,isht3/zulip,easyfmxu/zulip,krtkmj/zulip,akuseru/zulip,udxxabp/zulip,punchagan/zulip,akuseru/zulip,noroot/zulip,adnanh/zulip,nicholasbs/zulip,easyfmxu/zulip,Diptanshu8/zulip,Jianchun1/zulip,schatt/zulip,jimmy54/zulip,umkay/zulip,showell/zulip,hackerkid/zulip,synicalsyntax/zulip,AZtheAsian/zulip,isht3/zulip,he15his/zulip,jerryge/zulip,xuxiao/zulip,themass/zulip,Frouk/zulip,johnny9/zulip,jackrzhang/zulip,isht3/zulip,bastianh/zulip,calvinleenyc/zulip,zulip/zulip,amanharitsh123/zulip,eastlhu/zulip,lfranchi/zulip,punchagan/zulip,mohsenSy/zulip,fw1121/zulip,blaze225/zulip,itnihao/zulip,ashwinirudrappa/zulip,tdr130/zulip,qq1012803704/zulip,sup95/zulip,jessedhillon/zulip,ericzhou2008/zulip,jonesgithub/zulip,noroot/zulip,gkotian/zulip,glovebx/zulip,LAndreas/zulip,glovebx/zulip,PaulPetring/zulip,alliejones/zulip,rishig/zulip,verma-varsha/zulip,peiwei/zulip,peiwei/zulip,hengqujushi/zulip,seapasulli/zulip,grave-w-grave/zulip,DazWorrall/zulip,kaiyuanheshang/zulip,reyha/zulip,hayderimran7/zulip,gigawhitlocks/zulip,dxq-git/zulip,KingxBanana/zulip,showell/zulip,zorojean/zulip,grave-w-grave/zulip,lfranchi/zulip,mohsenSy/zulip,SmartPeople/zulip,kokoar/zulip,fw1121/zulip,wdaher/zulip,zulip/zulip,mansilladev/zulip,thomasboyt/zulip,wweiradio/zulip,wavelets/zulip,atomic-labs/zulip,jessedhillon/zulip,souravbadami/zulip,hayderimran7/zulip,jainayush975/zulip,thomasboyt/zulip,blaze225/zulip,dxq-git/zulip,punchagan/zulip,dhcrzf/zulip,mdavid/zulip,aliceriot/zulip,bluesea/zulip,aakash-cr7/zulip,dnmfarrell/zulip,Frouk/zulip,jimmy54/zulip,Suninus/zulip,avastu/zulip,shaunstanislaus/zulip,ufosky-server/zulip,alliejones/zulip,Galexrt/zulip,peguin40/zulip,ikasumiwt/zulip,avastu/zulip,ashwinirudrappa/zulip,pradiptad/zulip,JPJPJPOPOP/zulip,moria/zulip,ahmadassaf/zulip,Vallher/zulip,joyhchen/zulip,bastianh/zulip,andersk/zulip,bastianh/zulip,levixie/zulip,kokoar/zulip,kaiyuanheshang/zulip,ryansnowboarder/zulip,Vallher/zulip,brockwhittaker/zulip,yuvipanda/zulip,ashwinirudrappa/zulip,showell/zulip,KJin99/zulip,brainwane/zulip,grave-w-grave/zulip,technicalpickles/zulip,LeeRisk/zulip,zachallaun/zulip,JPJPJPOPOP/zulip,jonesgithub/zulip,RobotCaleb/zulip,LeeRisk/zulip,calvinleenyc/zulip,PaulPetring/zulip,hackerkid/zulip,eeshangarg/zulip,developerfm/zulip,dotcool/zulip,samatdav/zulip,proliming/zulip,aps-sids/zulip,ahmadassaf/zulip,developerfm/zulip,mahim97/zulip,isht3/zulip,johnnygaddarr/zulip,alliejones/zulip,zulip/zulip,littledogboy/zulip,synicalsyntax/zulip,SmartPeople/zulip,aliceriot/zulip,atomic-labs/zulip,zwily/zulip,Diptanshu8/zulip,hengqujushi/zulip,shubhamdhama/zulip,hayderimran7/zulip,bitemyapp/zulip,ipernet/zulip,LeeRisk/zulip,johnny9/zulip,kou/zulip,tiansiyuan/zulip,susansls/zulip,joshisa/zulip,dattatreya303/zulip,Drooids/zulip,kou/zulip,TigorC/zulip,themass/zulip,zorojean/zulip,j831/zulip,nicholasbs/zulip,armooo/zulip,aps-sids/zulip,dnmfarrell/zulip,LAndreas/zulip,tdr130/zulip,shaunstanislaus/zulip,bowlofstew/zulip,alliejones/zulip,brockwhittaker/zulip,qq1012803704/zulip,johnny9/zulip,bluesea/zulip,littledogboy/zulip,zwily/zulip,MariaFaBella85/zulip,voidException/zulip,esander91/zulip,suxinde2009/zulip,eastlhu/zulip,Jianchun1/zulip,Galexrt/zulip,gigawhitlocks/zulip,niftynei/zulip,zulip/zulip,mahim97/zulip,zwily/zulip,zwily/zulip,wdaher/zulip,qq1012803704/zulip,gigawhitlocks/zulip,mahim97/zulip,hj3938/zulip,bowlofstew/zulip,AZtheAsian/zulip,bitemyapp/zulip,kou/zulip,ahmadassaf/zulip,tdr130/zulip,ericzhou2008/zulip,fw1121/zulip,ikasumiwt/zulip,yuvipanda/zulip,he15his/zulip,ryansnowboarder/zulip,PhilSk/zulip,jrowan/zulip,Batterfii/zulip,tdr130/zulip,Drooids/zulip,paxapy/zulip,arpitpanwar/zulip,tiansiyuan/zulip,luyifan/zulip,huangkebo/zulip,reyha/zulip,natanovia/zulip,ericzhou2008/zulip,zacps/zulip,TigorC/zulip,xuanhan863/zulip,mansilladev/zulip,bitemyapp/zulip,vakila/zulip,joshisa/zulip,TigorC/zulip,dotcool/zulip,suxinde2009/zulip,yocome/zulip,aps-sids/zulip,krtkmj/zulip,Batterfii/zulip,wangdeshui/zulip,nicholasbs/zulip,jackrzhang/zulip,gkotian/zulip,MayB/zulip,bowlofstew/zulip,dattatreya303/zulip,nicholasbs/zulip,wavelets/zulip,hustlzp/zulip,jessedhillon/zulip,hj3938/zulip,moria/zulip,avastu/zulip,xuxiao/zulip,Cheppers/zulip,bitemyapp/zulip,willingc/zulip,KJin99/zulip,joyhchen/zulip,firstblade/zulip,zwily/zulip,ikasumiwt/zulip,EasonYi/zulip,zhaoweigg/zulip,amyliu345/zulip,amallia/zulip,susansls/zulip,cosmicAsymmetry/zulip,sonali0901/zulip,jrowan/zulip,voidException/zulip,wangdeshui/zulip,deer-hope/zulip,grave-w-grave/zulip,ikasumiwt/zulip,akuseru/zulip,gigawhitlocks/zulip,arpitpanwar/zulip,jrowan/zulip,jphilipsen05/zulip,tommyip/zulip,zacps/zulip,amallia/zulip,gkotian/zulip,christi3k/zulip,ryanbackman/zulip,shrikrishnaholla/zulip,KJin99/zulip,Jianchun1/zulip,lfranchi/zulip,jainayush975/zulip,jackrzhang/zulip,ipernet/zulip,voidException/zulip,lfranchi/zulip,luyifan/zulip,saitodisse/zulip,hj3938/zulip,yuvipanda/zulip,Suninus/zulip,EasonYi/zulip,brainwane/zulip,aps-sids/zulip,TigorC/zulip,littledogboy/zulip,dhcrzf/zulip,arpith/zulip,ahmadassaf/zulip,developerfm/zulip,stamhe/zulip,glovebx/zulip,bssrdf/zulip,Diptanshu8/zulip,kou/zulip,dotcool/zulip,hengqujushi/zulip,praveenaki/zulip,Qgap/zulip,kokoar/zulip,esander91/zulip,Juanvulcano/zulip,j831/zulip,amyliu345/zulip,firstblade/zulip,fw1121/zulip,firstblade/zulip,krtkmj/zulip,susansls/zulip,amyliu345/zulip,bowlofstew/zulip,luyifan/zulip,levixie/zulip,he15his/zulip,ericzhou2008/zulip,dnmfarrell/zulip,zwily/zulip,brockwhittaker/zulip,willingc/zulip,rht/zulip,babbage/zulip,littledogboy/zulip,arpitpanwar/zulip,timabbott/zulip,suxinde2009/zulip,joyhchen/zulip,xuxiao/zulip,shrikrishnaholla/zulip,umkay/zulip,tiansiyuan/zulip,Qgap/zulip,aps-sids/zulip,yuvipanda/zulip,rht/zulip,schatt/zulip,peiwei/zulip,mdavid/zulip,calvinleenyc/zulip,punchagan/zulip,fw1121/zulip,tommyip/zulip,m1ssou/zulip,gkotian/zulip,gkotian/zulip,suxinde2009/zulip,praveenaki/zulip,ApsOps/zulip,susansls/zulip,stamhe/zulip,LAndreas/zulip,amanharitsh123/zulip,tbutter/zulip,brainwane/zulip,KingxBanana/zulip,so0k/zulip,peiwei/zulip,mohsenSy/zulip,themass/zulip,shaunstanislaus/zulip,adnanh/zulip,itnihao/zulip,esander91/zulip,hengqujushi/zulip,Jianchun1/zulip,EasonYi/zulip,mahim97/zulip,blaze225/zulip,samatdav/zulip,atomic-labs/zulip,umkay/zulip,xuxiao/zulip,dattatreya303/zulip,niftynei/zulip,tiansiyuan/zulip,adnanh/zulip,zorojean/zulip,ApsOps/zulip,akuseru/zulip,proliming/zulip,swinghu/zulip,jrowan/zulip,eastlhu/zulip,mdavid/zulip,Gabriel0402/zulip,hustlzp/zulip,zhaoweigg/zulip,zachallaun/zulip,zofuthan/zulip,guiquanz/zulip,brainwane/zulip,souravbadami/zulip,verma-varsha/zulip,levixie/zulip,PhilSk/zulip,bssrdf/zulip,m1ssou/zulip,calvinleenyc/zulip,kokoar/zulip,ashwinirudrappa/zulip,zhaoweigg/zulip,jimmy54/zulip,vakila/zulip,johnny9/zulip,gkotian/zulip,LeeRisk/zulip,TigorC/zulip,JPJPJPOPOP/zulip,eeshangarg/zulip,wavelets/zulip,bowlofstew/zulip,mahim97/zulip,tiansiyuan/zulip,cosmicAsymmetry/zulip,thomasboyt/zulip,Suninus/zulip,Drooids/zulip,showell/zulip,tiansiyuan/zulip,jessedhillon/zulip,SmartPeople/zulip,LeeRisk/zulip,so0k/zulip,bluesea/zulip,seapasulli/zulip,shubhamdhama/zulip,jonesgithub/zulip,samatdav/zulip,zulip/zulip,sonali0901/zulip,Drooids/zulip,firstblade/zulip,bitemyapp/zulip,samatdav/zulip,cosmicAsymmetry/zulip,rishig/zulip,blaze225/zulip,sharmaeklavya2/zulip,moria/zulip,andersk/zulip,samatdav/zulip,schatt/zulip,eastlhu/zulip,AZtheAsian/zulip,littledogboy/zulip,showell/zulip,dnmfarrell/zulip,zhaoweigg/zulip,RobotCaleb/zulip,PaulPetring/zulip,zorojean/zulip,DazWorrall/zulip,willingc/zulip,timabbott/zulip,Drooids/zulip,RobotCaleb/zulip,glovebx/zulip,hengqujushi/zulip,dawran6/zulip,esander91/zulip,aakash-cr7/zulip,Batterfii/zulip,Vallher/zulip,he15his/zulip,reyha/zulip,saitodisse/zulip,wweiradio/zulip,calvinleenyc/zulip,nicholasbs/zulip,Galexrt/zulip,zhaoweigg/zulip,jainayush975/zulip,dhcrzf/zulip,Gabriel0402/zulip,Galexrt/zulip,ApsOps/zulip,wdaher/zulip,easyfmxu/zulip,zofuthan/zulip,jerryge/zulip,Batterfii/zulip,mansilladev/zulip,willingc/zulip,armooo/zulip,levixie/zulip,RobotCaleb/zulip,jessedhillon/zulip,brockwhittaker/zulip,EasonYi/zulip,stamhe/zulip,guiquanz/zulip,hafeez3000/zulip,atomic-labs/zulip,arpith/zulip,mahim97/zulip,karamcnair/zulip,johnny9/zulip,rht/zulip,LAndreas/zulip,easyfmxu/zulip,synicalsyntax/zulip,zachallaun/zulip,deer-hope/zulip,vaidap/zulip,kaiyuanheshang/zulip,kou/zulip,jimmy54/zulip,moria/zulip,jackrzhang/zulip,johnny9/zulip,peiwei/zulip,vabs22/zulip,Frouk/zulip,mansilladev/zulip,codeKonami/zulip,LAndreas/zulip,jimmy54/zulip,vabs22/zulip,jphilipsen05/zulip,amyliu345/zulip,so0k/zulip,showell/zulip,bssrdf/zulip,ipernet/zulip,ipernet/zulip,m1ssou/zulip,mdavid/zulip,pradiptad/zulip,m1ssou/zulip,swinghu/zulip,amanharitsh123/zulip,yocome/zulip,aliceriot/zulip,aliceriot/zulip,tiansiyuan/zulip,dhcrzf/zulip,amallia/zulip,so0k/zulip,dnmfarrell/zulip,Vallher/zulip,zachallaun/zulip,huangkebo/zulip,qq1012803704/zulip,karamcnair/zulip,hustlzp/zulip,natanovia/zulip,kaiyuanheshang/zulip,xuanhan863/zulip,MariaFaBella85/zulip,ryanbackman/zulip,jrowan/zulip,wweiradio/zulip,amanharitsh123/zulip,peiwei/zulip,mdavid/zulip,Galexrt/zulip,deer-hope/zulip,ufosky-server/zulip,bssrdf/zulip,willingc/zulip,so0k/zulip,easyfmxu/zulip,DazWorrall/zulip,JPJPJPOPOP/zulip,luyifan/zulip,wweiradio/zulip,mdavid/zulip,tommyip/zulip,Suninus/zulip,PhilSk/zulip,Drooids/zulip,vakila/zulip,luyifan/zulip,bastianh/zulip,ApsOps/zulip,rishig/zulip,kaiyuanheshang/zulip,JanzTam/zulip,joyhchen/zulip,Vallher/zulip,Cheppers/zulip,udxxabp/zulip,paxapy/zulip,PhilSk/zulip,christi3k/zulip,rishig/zulip,dhcrzf/zulip,grave-w-grave/zulip,peguin40/zulip,kokoar/zulip,alliejones/zulip,Cheppers/zulip,hafeez3000/zulip,MayB/zulip,avastu/zulip,qq1012803704/zulip,tbutter/zulip,johnnygaddarr/zulip,babbage/zulip,bluesea/zulip,LAndreas/zulip,ikasumiwt/zulip,lfranchi/zulip,shubhamdhama/zulip,niftynei/zulip,seapasulli/zulip,JanzTam/zulip,deer-hope/zulip,itnihao/zulip,glovebx/zulip,Gabriel0402/zulip,JanzTam/zulip,calvinleenyc/zulip,proliming/zulip,KingxBanana/zulip,seapasulli/zulip,noroot/zulip,praveenaki/zulip,timabbott/zulip,j831/zulip,udxxabp/zulip,hustlzp/zulip,hustlzp/zulip,brainwane/zulip,AZtheAsian/zulip,ryanbackman/zulip,zofuthan/zulip,tbutter/zulip,brockwhittaker/zulip,TigorC/zulip,jainayush975/zulip,vikas-parashar/zulip,johnny9/zulip,sup95/zulip,Cheppers/zulip,zhaoweigg/zulip,shubhamdhama/zulip,umkay/zulip,kou/zulip,technicalpickles/zulip,niftynei/zulip,luyifan/zulip,sharmaeklavya2/zulip,EasonYi/zulip,mohsenSy/zulip,MayB/zulip,Juanvulcano/zulip,xuxiao/zulip,ApsOps/zulip,dnmfarrell/zulip,yocome/zulip,vakila/zulip,jerryge/zulip,codeKonami/zulip,xuanhan863/zulip,hustlzp/zulip,ryansnowboarder/zulip,jackrzhang/zulip,littledogboy/zulip,wdaher/zulip,Batterfii/zulip,mohsenSy/zulip,ryansnowboarder/zulip,codeKonami/zulip,Vallher/zulip,vabs22/zulip,mansilladev/zulip,niftynei/zulip,PhilSk/zulip,dotcool/zulip,seapasulli/zulip,levixie/zulip,dotcool/zulip,ipernet/zulip,ryanbackman/zulip,Gabriel0402/zulip,armooo/zulip,Juanvulcano/zulip,wangdeshui/zulip,yuvipanda/zulip,hayderimran7/zulip,praveenaki/zulip,dwrpayne/zulip,shubhamdhama/zulip,Diptanshu8/zulip,proliming/zulip,amallia/zulip,schatt/zulip,shaunstanislaus/zulip,babbage/zulip,jimmy54/zulip,shubhamdhama/zulip,hengqujushi/zulip,armooo/zulip,wweiradio/zulip,kokoar/zulip,zachallaun/zulip,udxxabp/zulip,firstblade/zulip,bastianh/zulip,avastu/zulip,verma-varsha/zulip,codeKonami/zulip,punchagan/zulip,ericzhou2008/zulip,stamhe/zulip,avastu/zulip,dotcool/zulip,natanovia/zulip,easyfmxu/zulip,dxq-git/zulip,itnihao/zulip,alliejones/zulip,ufosky-server/zulip,wdaher/zulip,krtkmj/zulip,vaidap/zulip,natanovia/zulip,joshisa/zulip,jessedhillon/zulip,rht/zulip,akuseru/zulip,hayderimran7/zulip,dawran6/zulip,christi3k/zulip,sonali0901/zulip,souravbadami/zulip,hackerkid/zulip,EasonYi/zulip,zacps/zulip,jeffcao/zulip,dawran6/zulip,eeshangarg/zulip,souravbadami/zulip,rht/zulip,hafeez3000/zulip,LAndreas/zulip,Suninus/zulip,deer-hope/zulip,joyhchen/zulip,wweiradio/zulip,isht3/zulip,zhaoweigg/zulip,vaidap/zulip,tbutter/zulip,SmartPeople/zulip,ryanbackman/zulip,swinghu/zulip,littledogboy/zulip,umkay/zulip,paxapy/zulip,brainwane/zulip,shubhamdhama/zulip,themass/zulip,seapasulli/zulip,shrikrishnaholla/zulip,willingc/zulip,dawran6/zulip,vabs22/zulip,hustlzp/zulip,huangkebo/zulip,karamcnair/zulip,aliceriot/zulip,proliming/zulip,babbage/zulip,Juanvulcano/zulip,themass/zulip,MayB/zulip,shaunstanislaus/zulip,sonali0901/zulip,bssrdf/zulip,joyhchen/zulip,vakila/zulip,akuseru/zulip,vikas-parashar/zulip,jonesgithub/zulip,technicalpickles/zulip,arpith/zulip,Gabriel0402/zulip,wavelets/zulip,themass/zulip,voidException/zulip,jeffcao/zulip,ashwinirudrappa/zulip,cosmicAsymmetry/zulip,karamcnair/zulip,DazWorrall/zulip,jphilipsen05/zulip,hj3938/zulip,RobotCaleb/zulip,MariaFaBella85/zulip,Cheppers/zulip,natanovia/zulip,jphilipsen05/zulip,technicalpickles/zulip,m1ssou/zulip,wangdeshui/zulip,kokoar/zulip,aliceriot/zulip,ryansnowboarder/zulip,kaiyuanheshang/zulip,atomic-labs/zulip,krtkmj/zulip,babbage/zulip,wdaher/zulip,rishig/zulip,itnihao/zulip,bowlofstew/zulip,RobotCaleb/zulip,arpith/zulip,huangkebo/zulip
|
from django.conf import settings
from hashlib import sha1
from os import path
from pipeline.compressors import SubProcessCompressor
class ClosureSourceMapCompressor(SubProcessCompressor):
def compress_js(self, js):
# js is the full text of the JavaScript source, and we can't
# easily get either the input file names or the output file
# name. So we just pick a unique arbitrary name. This is
# okay because we can figure out from the source map file
# contents which JavaScript files it corresponds to.
- source_map = path.join(settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR,
- sha1(js).hexdigest() + '.map')
+
+ # As a hack to make things easier, assume that any large input
+ # corresponds to app.js. This is like 60 times bigger than
+ # any other input file, at present.
+
+ if len(js) > 100000:
+ source_map_name = 'app.js.map'
+ else:
+ source_map_name = sha1(js).hexdigest() + '.map'
+
+ source_map = path.join(
+ settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR, source_map_name)
command = '%s --create_source_map %s' % (
settings.PIPELINE_CLOSURE_BINARY, source_map)
return self.execute_command(command, js)
|
Make it easier to find the source map for app.js
|
## Code Before:
from django.conf import settings
from hashlib import sha1
from os import path
from pipeline.compressors import SubProcessCompressor
class ClosureSourceMapCompressor(SubProcessCompressor):
def compress_js(self, js):
# js is the full text of the JavaScript source, and we can't
# easily get either the input file names or the output file
# name. So we just pick a unique arbitrary name. This is
# okay because we can figure out from the source map file
# contents which JavaScript files it corresponds to.
source_map = path.join(settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR,
sha1(js).hexdigest() + '.map')
command = '%s --create_source_map %s' % (
settings.PIPELINE_CLOSURE_BINARY, source_map)
return self.execute_command(command, js)
## Instruction:
Make it easier to find the source map for app.js
## Code After:
from django.conf import settings
from hashlib import sha1
from os import path
from pipeline.compressors import SubProcessCompressor
class ClosureSourceMapCompressor(SubProcessCompressor):
def compress_js(self, js):
# js is the full text of the JavaScript source, and we can't
# easily get either the input file names or the output file
# name. So we just pick a unique arbitrary name. This is
# okay because we can figure out from the source map file
# contents which JavaScript files it corresponds to.
# As a hack to make things easier, assume that any large input
# corresponds to app.js. This is like 60 times bigger than
# any other input file, at present.
if len(js) > 100000:
source_map_name = 'app.js.map'
else:
source_map_name = sha1(js).hexdigest() + '.map'
source_map = path.join(
settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR, source_map_name)
command = '%s --create_source_map %s' % (
settings.PIPELINE_CLOSURE_BINARY, source_map)
return self.execute_command(command, js)
|
# ... existing code ...
# contents which JavaScript files it corresponds to.
# As a hack to make things easier, assume that any large input
# corresponds to app.js. This is like 60 times bigger than
# any other input file, at present.
if len(js) > 100000:
source_map_name = 'app.js.map'
else:
source_map_name = sha1(js).hexdigest() + '.map'
source_map = path.join(
settings.PIPELINE_CLOSURE_SOURCE_MAP_DIR, source_map_name)
# ... rest of the code ...
|
62c51799953c1299e7c89c61a23270bf55e9cd69
|
PortalEnrollment/models.py
|
PortalEnrollment/models.py
|
from django.db import models
# Create your models here.
|
from django.db import models
from Portal.models import CharacterAttribute
from django.utils.translation import ugettext as _
# Create your models here.
class Enrollment(models.Model):
roles = models.ManyToManyField(_('Role'), CharacterAttribute)
open = models.BooleanField(_('Open Enrollment'), default=False)
limit = models.SmallIntegerField(_('Limit'))
background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
def reach_limit(self):
pass
class Meta:
verbose_name = _('Enrollment')
verbose_name_plural = _('Enrollments')
|
Add first model for Enrollment application
|
Add first model for Enrollment application
|
Python
|
mit
|
elryndir/GuildPortal,elryndir/GuildPortal
|
from django.db import models
+ from Portal.models import CharacterAttribute
+ from django.utils.translation import ugettext as _
# Create your models here.
+ class Enrollment(models.Model):
+ roles = models.ManyToManyField(_('Role'), CharacterAttribute)
+ open = models.BooleanField(_('Open Enrollment'), default=False)
+ limit = models.SmallIntegerField(_('Limit'))
+ background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
+ thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
+
+ def reach_limit(self):
+ pass
+
+ class Meta:
+ verbose_name = _('Enrollment')
+ verbose_name_plural = _('Enrollments')
|
Add first model for Enrollment application
|
## Code Before:
from django.db import models
# Create your models here.
## Instruction:
Add first model for Enrollment application
## Code After:
from django.db import models
from Portal.models import CharacterAttribute
from django.utils.translation import ugettext as _
# Create your models here.
class Enrollment(models.Model):
roles = models.ManyToManyField(_('Role'), CharacterAttribute)
open = models.BooleanField(_('Open Enrollment'), default=False)
limit = models.SmallIntegerField(_('Limit'))
background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
def reach_limit(self):
pass
class Meta:
verbose_name = _('Enrollment')
verbose_name_plural = _('Enrollments')
|
...
from django.db import models
from Portal.models import CharacterAttribute
from django.utils.translation import ugettext as _
...
# Create your models here.
class Enrollment(models.Model):
roles = models.ManyToManyField(_('Role'), CharacterAttribute)
open = models.BooleanField(_('Open Enrollment'), default=False)
limit = models.SmallIntegerField(_('Limit'))
background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
def reach_limit(self):
pass
class Meta:
verbose_name = _('Enrollment')
verbose_name_plural = _('Enrollments')
...
|
fa7172a5e3231e738d85df3baba130fdec7497d1
|
derrida/outwork/views.py
|
derrida/outwork/views.py
|
from django.views.generic import ListView
from haystack.query import SearchQuerySet
from haystack.inputs import Clean
from derrida.outwork.models import Outwork
class OutworkListView(ListView):
model = Outwork
template_name = 'outwork/outwork_list.html'
paginate_by = 16
def get_queryset(self):
# restrict to published articles
sqs = SearchQuerySet().models(self.model).filter(published=True)
if self.request.GET.get('query', None):
sqs = sqs.filter(content=Clean(self.request.GET['query']))
# default sort ?
return sqs
# return Outwork.objects.published(for_user=self.request.user)
|
from django.views.generic import ListView
from haystack.query import SearchQuerySet
from haystack.inputs import Clean, Raw
from derrida.outwork.models import Outwork
class OutworkListView(ListView):
model = Outwork
template_name = 'outwork/outwork_list.html'
paginate_by = 16
def get_queryset(self):
# restrict to published articles
sqs = SearchQuerySet().models(self.model).filter(published=Raw(True))
if self.request.GET.get('query', None):
sqs = sqs.filter(content=Clean(self.request.GET['query']))
# default sort ?
return sqs
# return Outwork.objects.published(for_user=self.request.user)
|
Fix outwork list view to properly filter on published=true in Solr
|
Fix outwork list view to properly filter on published=true in Solr
|
Python
|
apache-2.0
|
Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django
|
from django.views.generic import ListView
from haystack.query import SearchQuerySet
- from haystack.inputs import Clean
+ from haystack.inputs import Clean, Raw
from derrida.outwork.models import Outwork
class OutworkListView(ListView):
model = Outwork
template_name = 'outwork/outwork_list.html'
paginate_by = 16
def get_queryset(self):
# restrict to published articles
- sqs = SearchQuerySet().models(self.model).filter(published=True)
+ sqs = SearchQuerySet().models(self.model).filter(published=Raw(True))
if self.request.GET.get('query', None):
sqs = sqs.filter(content=Clean(self.request.GET['query']))
# default sort ?
return sqs
# return Outwork.objects.published(for_user=self.request.user)
|
Fix outwork list view to properly filter on published=true in Solr
|
## Code Before:
from django.views.generic import ListView
from haystack.query import SearchQuerySet
from haystack.inputs import Clean
from derrida.outwork.models import Outwork
class OutworkListView(ListView):
model = Outwork
template_name = 'outwork/outwork_list.html'
paginate_by = 16
def get_queryset(self):
# restrict to published articles
sqs = SearchQuerySet().models(self.model).filter(published=True)
if self.request.GET.get('query', None):
sqs = sqs.filter(content=Clean(self.request.GET['query']))
# default sort ?
return sqs
# return Outwork.objects.published(for_user=self.request.user)
## Instruction:
Fix outwork list view to properly filter on published=true in Solr
## Code After:
from django.views.generic import ListView
from haystack.query import SearchQuerySet
from haystack.inputs import Clean, Raw
from derrida.outwork.models import Outwork
class OutworkListView(ListView):
model = Outwork
template_name = 'outwork/outwork_list.html'
paginate_by = 16
def get_queryset(self):
# restrict to published articles
sqs = SearchQuerySet().models(self.model).filter(published=Raw(True))
if self.request.GET.get('query', None):
sqs = sqs.filter(content=Clean(self.request.GET['query']))
# default sort ?
return sqs
# return Outwork.objects.published(for_user=self.request.user)
|
// ... existing code ...
from haystack.query import SearchQuerySet
from haystack.inputs import Clean, Raw
// ... modified code ...
# restrict to published articles
sqs = SearchQuerySet().models(self.model).filter(published=Raw(True))
if self.request.GET.get('query', None):
// ... rest of the code ...
|
9808e97747785c27387ad1ce9ffc3e9a05c80f08
|
enigma.py
|
enigma.py
|
import string
class Steckerbrett:
def __init__(self):
pass
class Walzen:
def __init__(self):
pass
class Enigma:
def __init__(self):
pass
def cipher(self, message):
pass
|
import string
class Steckerbrett:
def __init__(self):
pass
class Umkehrwalze:
def __init__(self, wiring):
self.wiring = wiring
def encode(self, letter):
return self.wiring[string.ascii_uppercase.index(letter)]
class Walzen:
def __init__(self):
pass
class Enigma:
def __init__(self):
pass
def cipher(self, message):
pass
|
Create class for the reflectors
|
Create class for the reflectors
|
Python
|
mit
|
ranisalt/enigma
|
import string
class Steckerbrett:
def __init__(self):
pass
+
+
+ class Umkehrwalze:
+ def __init__(self, wiring):
+ self.wiring = wiring
+
+ def encode(self, letter):
+ return self.wiring[string.ascii_uppercase.index(letter)]
class Walzen:
def __init__(self):
pass
class Enigma:
def __init__(self):
pass
def cipher(self, message):
pass
|
Create class for the reflectors
|
## Code Before:
import string
class Steckerbrett:
def __init__(self):
pass
class Walzen:
def __init__(self):
pass
class Enigma:
def __init__(self):
pass
def cipher(self, message):
pass
## Instruction:
Create class for the reflectors
## Code After:
import string
class Steckerbrett:
def __init__(self):
pass
class Umkehrwalze:
def __init__(self, wiring):
self.wiring = wiring
def encode(self, letter):
return self.wiring[string.ascii_uppercase.index(letter)]
class Walzen:
def __init__(self):
pass
class Enigma:
def __init__(self):
pass
def cipher(self, message):
pass
|
...
pass
class Umkehrwalze:
def __init__(self, wiring):
self.wiring = wiring
def encode(self, letter):
return self.wiring[string.ascii_uppercase.index(letter)]
...
|
d0aba6489a96003c9a746bd38818cffa717d1469
|
akatsuki/bib2html.py
|
akatsuki/bib2html.py
|
from __future__ import unicode_literals
from akatsuki.exporter import export_html
from akatsuki.parser import load_bibtex_file
from akatsuki.utils import sort_by_date
def main(bibtex_file, html_file):
"""Load BibTeX file and export to HTML file"""
entries = load_bibtex_file(bibtex_file)
entries = sort_by_date(entries, reverse=True)
export_html(html_file, entries)
|
from __future__ import unicode_literals
from akatsuki.exporter import export_html
from akatsuki.parser import load_bibtex_file
from akatsuki.utils import pmid_to_url, sort_by_date
def main(bibtex_file, html_file):
"""Load BibTeX file and export to HTML file"""
entries = load_bibtex_file(bibtex_file)
entries = pmid_to_url(entries)
entries = sort_by_date(entries, reverse=True)
export_html(html_file, entries)
|
Add pmid to url convertion
|
Add pmid to url convertion
|
Python
|
mit
|
403JFW/akatsuki
|
from __future__ import unicode_literals
from akatsuki.exporter import export_html
from akatsuki.parser import load_bibtex_file
- from akatsuki.utils import sort_by_date
+ from akatsuki.utils import pmid_to_url, sort_by_date
def main(bibtex_file, html_file):
"""Load BibTeX file and export to HTML file"""
entries = load_bibtex_file(bibtex_file)
+ entries = pmid_to_url(entries)
entries = sort_by_date(entries, reverse=True)
export_html(html_file, entries)
|
Add pmid to url convertion
|
## Code Before:
from __future__ import unicode_literals
from akatsuki.exporter import export_html
from akatsuki.parser import load_bibtex_file
from akatsuki.utils import sort_by_date
def main(bibtex_file, html_file):
"""Load BibTeX file and export to HTML file"""
entries = load_bibtex_file(bibtex_file)
entries = sort_by_date(entries, reverse=True)
export_html(html_file, entries)
## Instruction:
Add pmid to url convertion
## Code After:
from __future__ import unicode_literals
from akatsuki.exporter import export_html
from akatsuki.parser import load_bibtex_file
from akatsuki.utils import pmid_to_url, sort_by_date
def main(bibtex_file, html_file):
"""Load BibTeX file and export to HTML file"""
entries = load_bibtex_file(bibtex_file)
entries = pmid_to_url(entries)
entries = sort_by_date(entries, reverse=True)
export_html(html_file, entries)
|
...
from akatsuki.parser import load_bibtex_file
from akatsuki.utils import pmid_to_url, sort_by_date
...
entries = load_bibtex_file(bibtex_file)
entries = pmid_to_url(entries)
entries = sort_by_date(entries, reverse=True)
...
|
805c52698b3fed8df98462c15045f5de3822e241
|
edx_repo_tools/dev/clone_org.py
|
edx_repo_tools/dev/clone_org.py
|
"""Clone an entire GitHub organization."""
import os.path
import click
from git.repo.base import Repo
from edx_repo_tools.auth import pass_github
@click.command()
@click.option(
'--forks/--no-forks', is_flag=True, default=False,
help="Should forks be included?"
)
@click.option(
'--depth', type=int, default=0,
help="Depth argument for git clone",
)
@click.argument(
'org'
)
@pass_github
def main(hub, forks, depth, org):
for repo in hub.organization(org).repositories():
if repo.fork and not forks:
continue
dir_name = repo.name
dir_name = dir_name.lstrip("-") # avoid dirname/option confusion
if os.path.exists(dir_name):
continue
print(repo.full_name)
clone_args = {}
if depth:
clone_args['depth'] = depth
Repo.clone_from(repo.ssh_url, dir_name, **clone_args)
|
"""Clone an entire GitHub organization."""
import os.path
import click
from git.repo.base import Repo
from edx_repo_tools.auth import pass_github
@click.command()
@click.option(
'--forks/--no-forks', is_flag=True, default=False,
help="Should forks be included?"
)
@click.option(
'--depth', type=int, default=0,
help="Depth argument for git clone",
)
@click.argument(
'org'
)
@pass_github
def main(hub, forks, depth, org):
for repo in hub.organization(org).iter_repos():
if repo.fork and not forks:
continue
dir_name = repo.name
dir_name = dir_name.lstrip("-") # avoid dirname/option confusion
if os.path.exists(dir_name):
continue
print(repo.full_name)
clone_args = {}
if depth:
clone_args['depth'] = depth
Repo.clone_from(repo.ssh_url, dir_name, **clone_args)
|
Fix to work in python 3.
|
Fix to work in python 3.
|
Python
|
apache-2.0
|
edx/repo-tools,edx/repo-tools
|
"""Clone an entire GitHub organization."""
import os.path
import click
from git.repo.base import Repo
from edx_repo_tools.auth import pass_github
-
@click.command()
@click.option(
'--forks/--no-forks', is_flag=True, default=False,
help="Should forks be included?"
)
@click.option(
'--depth', type=int, default=0,
help="Depth argument for git clone",
)
@click.argument(
'org'
)
@pass_github
def main(hub, forks, depth, org):
- for repo in hub.organization(org).repositories():
+ for repo in hub.organization(org).iter_repos():
if repo.fork and not forks:
continue
dir_name = repo.name
dir_name = dir_name.lstrip("-") # avoid dirname/option confusion
if os.path.exists(dir_name):
continue
print(repo.full_name)
clone_args = {}
if depth:
clone_args['depth'] = depth
Repo.clone_from(repo.ssh_url, dir_name, **clone_args)
|
Fix to work in python 3.
|
## Code Before:
"""Clone an entire GitHub organization."""
import os.path
import click
from git.repo.base import Repo
from edx_repo_tools.auth import pass_github
@click.command()
@click.option(
'--forks/--no-forks', is_flag=True, default=False,
help="Should forks be included?"
)
@click.option(
'--depth', type=int, default=0,
help="Depth argument for git clone",
)
@click.argument(
'org'
)
@pass_github
def main(hub, forks, depth, org):
for repo in hub.organization(org).repositories():
if repo.fork and not forks:
continue
dir_name = repo.name
dir_name = dir_name.lstrip("-") # avoid dirname/option confusion
if os.path.exists(dir_name):
continue
print(repo.full_name)
clone_args = {}
if depth:
clone_args['depth'] = depth
Repo.clone_from(repo.ssh_url, dir_name, **clone_args)
## Instruction:
Fix to work in python 3.
## Code After:
"""Clone an entire GitHub organization."""
import os.path
import click
from git.repo.base import Repo
from edx_repo_tools.auth import pass_github
@click.command()
@click.option(
'--forks/--no-forks', is_flag=True, default=False,
help="Should forks be included?"
)
@click.option(
'--depth', type=int, default=0,
help="Depth argument for git clone",
)
@click.argument(
'org'
)
@pass_github
def main(hub, forks, depth, org):
for repo in hub.organization(org).iter_repos():
if repo.fork and not forks:
continue
dir_name = repo.name
dir_name = dir_name.lstrip("-") # avoid dirname/option confusion
if os.path.exists(dir_name):
continue
print(repo.full_name)
clone_args = {}
if depth:
clone_args['depth'] = depth
Repo.clone_from(repo.ssh_url, dir_name, **clone_args)
|
// ... existing code ...
from edx_repo_tools.auth import pass_github
// ... modified code ...
def main(hub, forks, depth, org):
for repo in hub.organization(org).iter_repos():
if repo.fork and not forks:
// ... rest of the code ...
|
7d862be1aba5a062eeaf54ada9587278e7e93f5b
|
apps/provider/urls.py
|
apps/provider/urls.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
)
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
)
|
Change fhir practitioner url and add organization url
|
Change fhir practitioner url and add organization url
|
Python
|
apache-2.0
|
TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client,TransparentHealth/hhs_oauth_client
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
- url(r'^fhir/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
+ url(r'^fhir/practioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
+ url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
)
+
|
Change fhir practitioner url and add organization url
|
## Code Before:
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
)
## Instruction:
Change fhir practitioner url and add organization url
## Code After:
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import patterns, include, url
from .views import *
urlpatterns = patterns('',
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
)
|
...
url(r'^pjson/push$', pjson_provider_push, name="pjson_provider_push"),
url(r'^fhir/practioner/push$', fhir_practitioner_push, name="fhir_practitioner_push"),
url(r'^fhir/organization/push$', fhir_organization_push, name="fhir_organization_push"),
...
|
7111a61a66affcb3c60ea207084e537b2109da61
|
mangaki/mangaki/management/commands/top.py
|
mangaki/mangaki/management/commands/top.py
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.db import connection
from mangaki.models import Rating, Anime
from collections import Counter
import json
import sys
class Command(BaseCommand):
args = ''
help = 'Builds top'
def handle(self, *args, **options):
category = sys.argv[2]
c = Counter()
values = {'favorite': 10, 'like': 2, 'neutral': 0.5, 'dislike': -1}
anime_ids = Anime.objects.exclude(**{category: 1}).values_list('id', flat=True)
nb_ratings = Counter()
nb_stars = Counter()
for rating in Rating.objects.filter(work_id__in=anime_ids).select_related('work__anime__' + category):
contestant = getattr(rating.work.anime, category)
nb_ratings[contestant] += 1
if rating.choice == 'favorite':
nb_stars[contestant] += 1
c[contestant] += values.get(rating.choice, 0)
top = []
for i, (artist, score) in enumerate(c.most_common(20)):
top.append(dict(rank=i + 1, name=str(artist), score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.db import connection
from mangaki.models import Rating, Anime
from collections import Counter
import json
import sys
class Command(BaseCommand):
args = ''
help = 'Builds top'
def handle(self, *args, **options):
category = sys.argv[2]
c = Counter()
values = {'favorite': 10, 'like': 2, 'neutral': 0.5, 'dislike': -1}
anime_ids = Anime.objects.exclude(**{category: 1}).values_list('id', flat=True)
nb_ratings = Counter()
nb_stars = Counter()
for rating in Rating.objects.filter(work_id__in=anime_ids).select_related('work__anime__' + category):
contestant = getattr(rating.work.anime, category)
nb_ratings[contestant] += 1
if rating.choice == 'favorite':
nb_stars[contestant] += 1
c[contestant] += values.get(rating.choice, 0)
top = []
for i, (artist, score) in enumerate(c.most_common(20)):
top.append(dict(rank=i + 1, name=str(artist), id=artist.id, score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
|
Add ID to Artist in Top
|
Add ID to Artist in Top
|
Python
|
agpl-3.0
|
Mako-kun/mangaki,Elarnon/mangaki,Elarnon/mangaki,Mako-kun/mangaki,Mako-kun/mangaki,Elarnon/mangaki
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.db import connection
from mangaki.models import Rating, Anime
from collections import Counter
import json
import sys
class Command(BaseCommand):
args = ''
help = 'Builds top'
def handle(self, *args, **options):
category = sys.argv[2]
c = Counter()
values = {'favorite': 10, 'like': 2, 'neutral': 0.5, 'dislike': -1}
anime_ids = Anime.objects.exclude(**{category: 1}).values_list('id', flat=True)
nb_ratings = Counter()
nb_stars = Counter()
for rating in Rating.objects.filter(work_id__in=anime_ids).select_related('work__anime__' + category):
contestant = getattr(rating.work.anime, category)
nb_ratings[contestant] += 1
if rating.choice == 'favorite':
nb_stars[contestant] += 1
c[contestant] += values.get(rating.choice, 0)
top = []
for i, (artist, score) in enumerate(c.most_common(20)):
- top.append(dict(rank=i + 1, name=str(artist), score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
+ top.append(dict(rank=i + 1, name=str(artist), id=artist.id, score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
|
Add ID to Artist in Top
|
## Code Before:
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.db import connection
from mangaki.models import Rating, Anime
from collections import Counter
import json
import sys
class Command(BaseCommand):
args = ''
help = 'Builds top'
def handle(self, *args, **options):
category = sys.argv[2]
c = Counter()
values = {'favorite': 10, 'like': 2, 'neutral': 0.5, 'dislike': -1}
anime_ids = Anime.objects.exclude(**{category: 1}).values_list('id', flat=True)
nb_ratings = Counter()
nb_stars = Counter()
for rating in Rating.objects.filter(work_id__in=anime_ids).select_related('work__anime__' + category):
contestant = getattr(rating.work.anime, category)
nb_ratings[contestant] += 1
if rating.choice == 'favorite':
nb_stars[contestant] += 1
c[contestant] += values.get(rating.choice, 0)
top = []
for i, (artist, score) in enumerate(c.most_common(20)):
top.append(dict(rank=i + 1, name=str(artist), score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
## Instruction:
Add ID to Artist in Top
## Code After:
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Count
from django.db import connection
from mangaki.models import Rating, Anime
from collections import Counter
import json
import sys
class Command(BaseCommand):
args = ''
help = 'Builds top'
def handle(self, *args, **options):
category = sys.argv[2]
c = Counter()
values = {'favorite': 10, 'like': 2, 'neutral': 0.5, 'dislike': -1}
anime_ids = Anime.objects.exclude(**{category: 1}).values_list('id', flat=True)
nb_ratings = Counter()
nb_stars = Counter()
for rating in Rating.objects.filter(work_id__in=anime_ids).select_related('work__anime__' + category):
contestant = getattr(rating.work.anime, category)
nb_ratings[contestant] += 1
if rating.choice == 'favorite':
nb_stars[contestant] += 1
c[contestant] += values.get(rating.choice, 0)
top = []
for i, (artist, score) in enumerate(c.most_common(20)):
top.append(dict(rank=i + 1, name=str(artist), id=artist.id, score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
|
...
for i, (artist, score) in enumerate(c.most_common(20)):
top.append(dict(rank=i + 1, name=str(artist), id=artist.id, score=score, nb_ratings=nb_ratings[artist], nb_stars=nb_stars[artist]))
print(json.dumps(top))
...
|
745565adaff36e95676c427157acb52112e0a3cc
|
sitenco/config/vcs.py
|
sitenco/config/vcs.py
|
import abc
import brigit
from docutils import nodes
from docutils.parsers.rst import directives
from .tool import Tool, Directive
class VCS(Tool):
"""Abstract class for VCS tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, path, branch, url=None):
self.path = path
self.branch = branch
super(VCS, self).__init__()
@abc.abstractmethod
def log(self, number=10):
"""List of :class:`Commit` items."""
raise NotImplementedError
class Git(VCS):
"""Git tool."""
def __init__(self, path, branch='master', url=None):
self._repository = brigit.Git(path)
super(Git, self).__init__(path, branch)
def log(self, number=10):
commits = "%s~%i..%s" % (self.branch, number, self.branch)
return self._repository.pretty_log(commits)
def update(self):
self._repository.fetch()
class Log(Directive):
"""List logs as a definition list."""
option_spec = {'number': directives.nonnegative_int}
def run(self):
children = []
for item in self.tool.log():
children.append(nodes.term(text=item['hash']))
children.append(
nodes.definition('', nodes.paragraph(text=item['message'])))
definition_list = nodes.definition_list('', *children)
return [definition_list]
|
import abc
import brigit
from docutils import nodes
from docutils.parsers.rst import directives
from .tool import Tool, Directive
class VCS(Tool):
"""Abstract class for VCS tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, path, branch, url=None):
self.path = path
self.branch = branch
self.url = url
super(VCS, self).__init__()
@abc.abstractmethod
def log(self, number=10):
"""List of :class:`Commit` items."""
raise NotImplementedError
class Git(VCS):
"""Git tool."""
def __init__(self, path, branch='master', url=None):
super(Git, self).__init__(path, branch, url)
self._repository = brigit.Git(path, remote=self.url)
def log(self, number=10):
commits = "%s~%i..%s" % (self.branch, number, self.branch)
return self._repository.pretty_log(commits)
def update(self):
self._repository.fetch()
class Log(Directive):
"""List logs as a definition list."""
option_spec = {'number': directives.nonnegative_int}
def run(self):
children = []
for item in self.tool.log():
children.append(nodes.term(text=item['hash']))
children.append(
nodes.definition('', nodes.paragraph(text=item['message'])))
definition_list = nodes.definition_list('', *children)
return [definition_list]
|
Clone git repos if they do not exist.
|
Clone git repos if they do not exist.
|
Python
|
bsd-3-clause
|
Kozea/sitenco
|
import abc
import brigit
from docutils import nodes
from docutils.parsers.rst import directives
from .tool import Tool, Directive
class VCS(Tool):
"""Abstract class for VCS tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, path, branch, url=None):
self.path = path
self.branch = branch
+ self.url = url
super(VCS, self).__init__()
@abc.abstractmethod
def log(self, number=10):
"""List of :class:`Commit` items."""
raise NotImplementedError
class Git(VCS):
"""Git tool."""
def __init__(self, path, branch='master', url=None):
- self._repository = brigit.Git(path)
- super(Git, self).__init__(path, branch)
+ super(Git, self).__init__(path, branch, url)
+ self._repository = brigit.Git(path, remote=self.url)
def log(self, number=10):
commits = "%s~%i..%s" % (self.branch, number, self.branch)
return self._repository.pretty_log(commits)
def update(self):
self._repository.fetch()
class Log(Directive):
"""List logs as a definition list."""
option_spec = {'number': directives.nonnegative_int}
def run(self):
children = []
for item in self.tool.log():
children.append(nodes.term(text=item['hash']))
children.append(
nodes.definition('', nodes.paragraph(text=item['message'])))
definition_list = nodes.definition_list('', *children)
return [definition_list]
|
Clone git repos if they do not exist.
|
## Code Before:
import abc
import brigit
from docutils import nodes
from docutils.parsers.rst import directives
from .tool import Tool, Directive
class VCS(Tool):
"""Abstract class for VCS tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, path, branch, url=None):
self.path = path
self.branch = branch
super(VCS, self).__init__()
@abc.abstractmethod
def log(self, number=10):
"""List of :class:`Commit` items."""
raise NotImplementedError
class Git(VCS):
"""Git tool."""
def __init__(self, path, branch='master', url=None):
self._repository = brigit.Git(path)
super(Git, self).__init__(path, branch)
def log(self, number=10):
commits = "%s~%i..%s" % (self.branch, number, self.branch)
return self._repository.pretty_log(commits)
def update(self):
self._repository.fetch()
class Log(Directive):
"""List logs as a definition list."""
option_spec = {'number': directives.nonnegative_int}
def run(self):
children = []
for item in self.tool.log():
children.append(nodes.term(text=item['hash']))
children.append(
nodes.definition('', nodes.paragraph(text=item['message'])))
definition_list = nodes.definition_list('', *children)
return [definition_list]
## Instruction:
Clone git repos if they do not exist.
## Code After:
import abc
import brigit
from docutils import nodes
from docutils.parsers.rst import directives
from .tool import Tool, Directive
class VCS(Tool):
"""Abstract class for VCS tools."""
__metaclass__ = abc.ABCMeta
def __init__(self, path, branch, url=None):
self.path = path
self.branch = branch
self.url = url
super(VCS, self).__init__()
@abc.abstractmethod
def log(self, number=10):
"""List of :class:`Commit` items."""
raise NotImplementedError
class Git(VCS):
"""Git tool."""
def __init__(self, path, branch='master', url=None):
super(Git, self).__init__(path, branch, url)
self._repository = brigit.Git(path, remote=self.url)
def log(self, number=10):
commits = "%s~%i..%s" % (self.branch, number, self.branch)
return self._repository.pretty_log(commits)
def update(self):
self._repository.fetch()
class Log(Directive):
"""List logs as a definition list."""
option_spec = {'number': directives.nonnegative_int}
def run(self):
children = []
for item in self.tool.log():
children.append(nodes.term(text=item['hash']))
children.append(
nodes.definition('', nodes.paragraph(text=item['message'])))
definition_list = nodes.definition_list('', *children)
return [definition_list]
|
...
self.branch = branch
self.url = url
super(VCS, self).__init__()
...
def __init__(self, path, branch='master', url=None):
super(Git, self).__init__(path, branch, url)
self._repository = brigit.Git(path, remote=self.url)
...
|
69baf68b436255eca71ec63578a2fdef4bc03165
|
books.py
|
books.py
|
import falcon
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = open('/home/sanchopanca/Documents/thunder.txt').read()
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
|
import falcon
def get_paragraphs(pathname):
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = open('/home/sanchopanca/Documents/thunder.txt').read()
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
|
Add function which divide text to paragraphs
|
Add function which divide text to paragraphs
|
Python
|
agpl-3.0
|
sanchopanca/reader,sanchopanca/reader
|
import falcon
+
+
+ def get_paragraphs(pathname):
+ result = []
+ with open(pathname) as f:
+ for line in f.readlines():
+ if line != '\n':
+ result.append(line[:-1])
+ return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = open('/home/sanchopanca/Documents/thunder.txt').read()
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
+
+ if __name__ == '__main__':
+ paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
+ print(paragraphs)
|
Add function which divide text to paragraphs
|
## Code Before:
import falcon
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = open('/home/sanchopanca/Documents/thunder.txt').read()
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
## Instruction:
Add function which divide text to paragraphs
## Code After:
import falcon
def get_paragraphs(pathname):
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
class BooksResource:
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = open('/home/sanchopanca/Documents/thunder.txt').read()
app = falcon.API()
books = BooksResource()
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
|
...
import falcon
def get_paragraphs(pathname):
result = []
with open(pathname) as f:
for line in f.readlines():
if line != '\n':
result.append(line[:-1])
return result
...
app.add_route('/books', books)
if __name__ == '__main__':
paragraphs = get_paragraphs('/home/sanchopanca/Documents/thunder.txt')
print(paragraphs)
...
|
a3c3a6ed4d01f1857fc4728b10505e330af9e6ae
|
code/helper/easierlife.py
|
code/helper/easierlife.py
|
import fileinput
import json
import os.path
from dstruct import Sentence
## BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
## Return the start and end indexes of all subsets of words in the sentence
## sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
## Return Sentence objects from input lines
def get_input_sentences(input_files=[]):
for line in fileinput.input(input_files):
sent_dict = json.loads(line)
yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"],
sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
sent_dict["dep_paths"], sent_dict["dep_parents"],
sent_dict["bounding_boxes"])
|
import fileinput
import json
import os.path
import sys
from dstruct.Sentence import Sentence
## BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
## Return the start and end indexes of all subsets of words in the sentence
## sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
## Return Sentence objects from input lines
def get_input_sentences(input_files=sys.argv[1:]):
with fileinput.input(files=input_files) as f:
for line in f:
sent_dict = json.loads(line)
yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"],
sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
sent_dict["dep_paths"], sent_dict["dep_parents"],
sent_dict["bounding_boxes"])
|
Fix import, use fileinput.iput as context, and fix its argument
|
Fix import, use fileinput.iput as context, and fix its argument
|
Python
|
apache-2.0
|
amwenger/dd-genomics,rionda/dd-genomics,HazyResearch/dd-genomics,amwenger/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,HazyResearch/dd-genomics,rionda/dd-genomics,amwenger/dd-genomics,HazyResearch/dd-genomics
|
import fileinput
import json
import os.path
+ import sys
- from dstruct import Sentence
+ from dstruct.Sentence import Sentence
## BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
## Return the start and end indexes of all subsets of words in the sentence
## sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
## Return Sentence objects from input lines
- def get_input_sentences(input_files=[]):
+ def get_input_sentences(input_files=sys.argv[1:]):
- for line in fileinput.input(input_files):
+ with fileinput.input(files=input_files) as f:
+ for line in f:
- sent_dict = json.loads(line)
+ sent_dict = json.loads(line)
- yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
+ yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
- sent_dict["wordidxs"], sent_dict["words"],
+ sent_dict["wordidxs"], sent_dict["words"],
- sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
+ sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
- sent_dict["dep_paths"], sent_dict["dep_parents"],
+ sent_dict["dep_paths"], sent_dict["dep_parents"],
- sent_dict["bounding_boxes"])
+ sent_dict["bounding_boxes"])
|
Fix import, use fileinput.iput as context, and fix its argument
|
## Code Before:
import fileinput
import json
import os.path
from dstruct import Sentence
## BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
## Return the start and end indexes of all subsets of words in the sentence
## sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
## Return Sentence objects from input lines
def get_input_sentences(input_files=[]):
for line in fileinput.input(input_files):
sent_dict = json.loads(line)
yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"],
sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
sent_dict["dep_paths"], sent_dict["dep_parents"],
sent_dict["bounding_boxes"])
## Instruction:
Fix import, use fileinput.iput as context, and fix its argument
## Code After:
import fileinput
import json
import os.path
import sys
from dstruct.Sentence import Sentence
## BASE_DIR denotes the application directory
BASE_DIR, throwaway = os.path.split(os.path.realpath(__file__))
BASE_DIR = os.path.realpath(BASE_DIR + "/../..")
## Return the start and end indexes of all subsets of words in the sentence
## sent, with size at most max_phrase_length
def get_all_phrases_in_sentence(sent, max_phrase_length):
for start in range(len(sent.words)):
for end in reversed(range(start + 1, min(len(sent.words), start + 1 + max_phrase_length))):
yield (start, end)
## Return Sentence objects from input lines
def get_input_sentences(input_files=sys.argv[1:]):
with fileinput.input(files=input_files) as f:
for line in f:
sent_dict = json.loads(line)
yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"],
sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
sent_dict["dep_paths"], sent_dict["dep_parents"],
sent_dict["bounding_boxes"])
|
# ... existing code ...
import os.path
import sys
from dstruct.Sentence import Sentence
# ... modified code ...
## Return Sentence objects from input lines
def get_input_sentences(input_files=sys.argv[1:]):
with fileinput.input(files=input_files) as f:
for line in f:
sent_dict = json.loads(line)
yield Sentence(sent_dict["doc_id"], sent_dict["sent_id"],
sent_dict["wordidxs"], sent_dict["words"],
sent_dict["poses"], sent_dict["ners"], sent_dict["lemmas"],
sent_dict["dep_paths"], sent_dict["dep_parents"],
sent_dict["bounding_boxes"])
# ... rest of the code ...
|
28940582fcff57b66e702dfecfd96e83725fbab0
|
leisure/__init__.py
|
leisure/__init__.py
|
from __future__ import absolute_import
import sys
from .disco import run_script
from . import shuffle
import tempfile
def main():
script = sys.argv[1]
if len(sys.argv) == 3:
data_root = sys.argv[2]
else:
data_root = tempfile.mkdtemp()
run_script(script, data_root)
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
import sys
import os
from .disco import run_script
from . import shuffle
import tempfile
def main():
script = sys.argv[1]
script_dir = os.path.abspath(os.path.dirname(script))
if script_dir not in [os.path.abspath(p) for p in sys.path]:
sys.path.append(script_dir)
if len(sys.argv) == 3:
data_root = sys.argv[2]
else:
data_root = tempfile.mkdtemp()
run_script(script, data_root)
if __name__ == "__main__":
main()
|
Add script's path to the python path
|
Add script's path to the python path
|
Python
|
mit
|
trivio/leisure
|
from __future__ import absolute_import
import sys
+ import os
from .disco import run_script
from . import shuffle
import tempfile
def main():
script = sys.argv[1]
+ script_dir = os.path.abspath(os.path.dirname(script))
+ if script_dir not in [os.path.abspath(p) for p in sys.path]:
+ sys.path.append(script_dir)
+
if len(sys.argv) == 3:
data_root = sys.argv[2]
else:
data_root = tempfile.mkdtemp()
run_script(script, data_root)
if __name__ == "__main__":
main()
|
Add script's path to the python path
|
## Code Before:
from __future__ import absolute_import
import sys
from .disco import run_script
from . import shuffle
import tempfile
def main():
script = sys.argv[1]
if len(sys.argv) == 3:
data_root = sys.argv[2]
else:
data_root = tempfile.mkdtemp()
run_script(script, data_root)
if __name__ == "__main__":
main()
## Instruction:
Add script's path to the python path
## Code After:
from __future__ import absolute_import
import sys
import os
from .disco import run_script
from . import shuffle
import tempfile
def main():
script = sys.argv[1]
script_dir = os.path.abspath(os.path.dirname(script))
if script_dir not in [os.path.abspath(p) for p in sys.path]:
sys.path.append(script_dir)
if len(sys.argv) == 3:
data_root = sys.argv[2]
else:
data_root = tempfile.mkdtemp()
run_script(script, data_root)
if __name__ == "__main__":
main()
|
// ... existing code ...
import sys
import os
// ... modified code ...
script = sys.argv[1]
script_dir = os.path.abspath(os.path.dirname(script))
if script_dir not in [os.path.abspath(p) for p in sys.path]:
sys.path.append(script_dir)
if len(sys.argv) == 3:
// ... rest of the code ...
|
0cb45bbc1c7b6b5f1a2722e85159b97c8a555e0c
|
examples/providers/factory_deep_init_injections.py
|
examples/providers/factory_deep_init_injections.py
|
"""`Factory` providers deep init injections example."""
from dependency_injector import providers
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
assert algorithm_3.task.loss.regularizer.alpha == 0.8
|
"""`Factory` providers - building a complex object graph with deep init injections example."""
from dependency_injector import providers
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
assert algorithm_3.task.loss.regularizer.alpha == 0.8
|
Update the docblock of the example
|
Update the docblock of the example
|
Python
|
bsd-3-clause
|
ets-labs/dependency_injector,rmk135/dependency_injector,ets-labs/python-dependency-injector,rmk135/objects
|
- """`Factory` providers deep init injections example."""
+ """`Factory` providers - building a complex object graph with deep init injections example."""
from dependency_injector import providers
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
assert algorithm_3.task.loss.regularizer.alpha == 0.8
|
Update the docblock of the example
|
## Code Before:
"""`Factory` providers deep init injections example."""
from dependency_injector import providers
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
assert algorithm_3.task.loss.regularizer.alpha == 0.8
## Instruction:
Update the docblock of the example
## Code After:
"""`Factory` providers - building a complex object graph with deep init injections example."""
from dependency_injector import providers
class Regularizer:
def __init__(self, alpha):
self.alpha = alpha
class Loss:
def __init__(self, regularizer):
self.regularizer = regularizer
class ClassificationTask:
def __init__(self, loss):
self.loss = loss
class Algorithm:
def __init__(self, task):
self.task = task
algorithm_factory = providers.Factory(
Algorithm,
task=providers.Factory(
ClassificationTask,
loss=providers.Factory(
Loss,
regularizer=providers.Factory(
Regularizer,
),
),
),
)
if __name__ == '__main__':
algorithm_1 = algorithm_factory(task__loss__regularizer__alpha=0.5)
assert algorithm_1.task.loss.regularizer.alpha == 0.5
algorithm_2 = algorithm_factory(task__loss__regularizer__alpha=0.7)
assert algorithm_2.task.loss.regularizer.alpha == 0.7
algorithm_3 = algorithm_factory(task__loss__regularizer=Regularizer(alpha=0.8))
assert algorithm_3.task.loss.regularizer.alpha == 0.8
|
// ... existing code ...
"""`Factory` providers - building a complex object graph with deep init injections example."""
// ... rest of the code ...
|
725bfcc3484826083c3e6cdca71b4af41b37a9c9
|
runtests.py
|
runtests.py
|
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
'fluent_contents.tests.testapp',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
FLUENT_CONTENTS_CACHE_OUTPUT = True,
SITE_ID = 3
)
def runtests():
argv = sys.argv[:1] + ['test', 'fluent_contents', '--traceback'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
'fluent_contents.tests.testapp',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', # for Django 1.6, see https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner
SITE_ID = 3,
FLUENT_CONTENTS_CACHE_OUTPUT = True,
)
def runtests():
argv = sys.argv[:1] + ['test', 'fluent_contents', '--traceback'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
Make sure tests are found in Django 1.6
|
Make sure tests are found in Django 1.6
|
Python
|
apache-2.0
|
jpotterm/django-fluent-contents,ixc/django-fluent-contents,edoburu/django-fluent-contents,jpotterm/django-fluent-contents,ixc/django-fluent-contents,django-fluent/django-fluent-contents,jpotterm/django-fluent-contents,ixc/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents,edoburu/django-fluent-contents
|
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
'fluent_contents.tests.testapp',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
+ TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', # for Django 1.6, see https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner
+ SITE_ID = 3,
FLUENT_CONTENTS_CACHE_OUTPUT = True,
- SITE_ID = 3
)
def runtests():
argv = sys.argv[:1] + ['test', 'fluent_contents', '--traceback'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
Make sure tests are found in Django 1.6
|
## Code Before:
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
'fluent_contents.tests.testapp',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
FLUENT_CONTENTS_CACHE_OUTPUT = True,
SITE_ID = 3
)
def runtests():
argv = sys.argv[:1] + ['test', 'fluent_contents', '--traceback'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
## Instruction:
Make sure tests are found in Django 1.6
## Code After:
import sys
from django.conf import settings
from django.core.management import execute_from_command_line
if not settings.configured:
settings.configure(
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.admin',
'fluent_contents',
'fluent_contents.tests.testapp',
),
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', # for Django 1.6, see https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner
SITE_ID = 3,
FLUENT_CONTENTS_CACHE_OUTPUT = True,
)
def runtests():
argv = sys.argv[:1] + ['test', 'fluent_contents', '--traceback'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
|
// ... existing code ...
ROOT_URLCONF = 'fluent_contents.tests.testapp.urls',
TEST_RUNNER='django.test.simple.DjangoTestSuiteRunner', # for Django 1.6, see https://docs.djangoproject.com/en/dev/releases/1.6/#new-test-runner
SITE_ID = 3,
FLUENT_CONTENTS_CACHE_OUTPUT = True,
)
// ... rest of the code ...
|
f70bbbdadc044a76f7b90b2cac0191353a6a5048
|
depfinder.py
|
depfinder.py
|
import ast
def get_imported_libs(code):
tree = ast.parse(code)
# ast.Import represents lines like 'import foo' and 'import foo, bar'
# the extra for name in t.names is needed, because names is a list that
# would be ['foo'] for the first and ['foo', 'bar'] for the second
imports = [name.name.split('.')[0] for t in tree.body
if type(t) == ast.Import for name in t.names]
# ast.ImportFrom represents lines like 'from foo import bar'
import_froms = [t.module.split('.')[0] for t in tree.body if type(t) == ast.ImportFrom if t.module]
return imports + import_froms
|
import ast
import os
from collections import deque
import sys
from stdlib_list import stdlib_list
conf = {
'ignore_relative_imports': True,
'ignore_builtin_modules': True,
'pyver': None,
}
def get_imported_libs(code):
tree = ast.parse(code)
imports = deque()
for t in tree.body:
# ast.Import represents lines like 'import foo' and 'import foo, bar'
# the extra for name in t.names is needed, because names is a list that
# would be ['foo'] for the first and ['foo', 'bar'] for the second
if type(t) == ast.Import:
imports.extend([name.name.split('.')[0] for name in t.names])
# ast.ImportFrom represents lines like 'from foo import bar'
# t.level == 0 is to get rid of 'from .foo import bar' and higher levels
# of relative importing
if type(t) == ast.ImportFrom:
if t.level > 0:
if conf['ignore_relative_imports'] or not t.module:
continue
else:
imports.append(t.module.split('.')[0])
return list(imports)
def iterate_over_library(path_to_source_code):
libs = set()
for parent, folders, files in os.walk(path_to_source_code):
for file in files:
if file.endswith('.py'):
print('.', end='')
full_file_path = os.path.join(parent, file)
with open(full_file_path, 'r') as f:
code = f.read()
libs.update(set(get_imported_libs(code)))
if conf['ignore_builtin_modules']:
if not conf['pyver']:
pyver = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
std_libs = stdlib_list("3.4")
# print(std_libs)
libs = [lib for lib in libs if lib not in std_libs]
return libs
|
Rework the import finding logic
|
MNT: Rework the import finding logic
|
Python
|
bsd-3-clause
|
ericdill/depfinder
|
import ast
+ import os
+ from collections import deque
+ import sys
+ from stdlib_list import stdlib_list
+
+ conf = {
+ 'ignore_relative_imports': True,
+ 'ignore_builtin_modules': True,
+ 'pyver': None,
+ }
def get_imported_libs(code):
tree = ast.parse(code)
+ imports = deque()
+ for t in tree.body:
- # ast.Import represents lines like 'import foo' and 'import foo, bar'
+ # ast.Import represents lines like 'import foo' and 'import foo, bar'
- # the extra for name in t.names is needed, because names is a list that
+ # the extra for name in t.names is needed, because names is a list that
- # would be ['foo'] for the first and ['foo', 'bar'] for the second
+ # would be ['foo'] for the first and ['foo', 'bar'] for the second
- imports = [name.name.split('.')[0] for t in tree.body
- if type(t) == ast.Import for name in t.names]
+ if type(t) == ast.Import:
+ imports.extend([name.name.split('.')[0] for name in t.names])
- # ast.ImportFrom represents lines like 'from foo import bar'
+ # ast.ImportFrom represents lines like 'from foo import bar'
- import_froms = [t.module.split('.')[0] for t in tree.body if type(t) == ast.ImportFrom if t.module]
- return imports + import_froms
+ # t.level == 0 is to get rid of 'from .foo import bar' and higher levels
+ # of relative importing
+ if type(t) == ast.ImportFrom:
+ if t.level > 0:
+ if conf['ignore_relative_imports'] or not t.module:
+ continue
+ else:
+ imports.append(t.module.split('.')[0])
+ return list(imports)
+
+
+ def iterate_over_library(path_to_source_code):
+ libs = set()
+ for parent, folders, files in os.walk(path_to_source_code):
+ for file in files:
+ if file.endswith('.py'):
+ print('.', end='')
+ full_file_path = os.path.join(parent, file)
+ with open(full_file_path, 'r') as f:
+ code = f.read()
+ libs.update(set(get_imported_libs(code)))
+
+ if conf['ignore_builtin_modules']:
+ if not conf['pyver']:
+ pyver = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
+ std_libs = stdlib_list("3.4")
+ # print(std_libs)
+ libs = [lib for lib in libs if lib not in std_libs]
+
+ return libs
|
Rework the import finding logic
|
## Code Before:
import ast
def get_imported_libs(code):
tree = ast.parse(code)
# ast.Import represents lines like 'import foo' and 'import foo, bar'
# the extra for name in t.names is needed, because names is a list that
# would be ['foo'] for the first and ['foo', 'bar'] for the second
imports = [name.name.split('.')[0] for t in tree.body
if type(t) == ast.Import for name in t.names]
# ast.ImportFrom represents lines like 'from foo import bar'
import_froms = [t.module.split('.')[0] for t in tree.body if type(t) == ast.ImportFrom if t.module]
return imports + import_froms
## Instruction:
Rework the import finding logic
## Code After:
import ast
import os
from collections import deque
import sys
from stdlib_list import stdlib_list
conf = {
'ignore_relative_imports': True,
'ignore_builtin_modules': True,
'pyver': None,
}
def get_imported_libs(code):
tree = ast.parse(code)
imports = deque()
for t in tree.body:
# ast.Import represents lines like 'import foo' and 'import foo, bar'
# the extra for name in t.names is needed, because names is a list that
# would be ['foo'] for the first and ['foo', 'bar'] for the second
if type(t) == ast.Import:
imports.extend([name.name.split('.')[0] for name in t.names])
# ast.ImportFrom represents lines like 'from foo import bar'
# t.level == 0 is to get rid of 'from .foo import bar' and higher levels
# of relative importing
if type(t) == ast.ImportFrom:
if t.level > 0:
if conf['ignore_relative_imports'] or not t.module:
continue
else:
imports.append(t.module.split('.')[0])
return list(imports)
def iterate_over_library(path_to_source_code):
libs = set()
for parent, folders, files in os.walk(path_to_source_code):
for file in files:
if file.endswith('.py'):
print('.', end='')
full_file_path = os.path.join(parent, file)
with open(full_file_path, 'r') as f:
code = f.read()
libs.update(set(get_imported_libs(code)))
if conf['ignore_builtin_modules']:
if not conf['pyver']:
pyver = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
std_libs = stdlib_list("3.4")
# print(std_libs)
libs = [lib for lib in libs if lib not in std_libs]
return libs
|
// ... existing code ...
import ast
import os
from collections import deque
import sys
from stdlib_list import stdlib_list
conf = {
'ignore_relative_imports': True,
'ignore_builtin_modules': True,
'pyver': None,
}
// ... modified code ...
tree = ast.parse(code)
imports = deque()
for t in tree.body:
# ast.Import represents lines like 'import foo' and 'import foo, bar'
# the extra for name in t.names is needed, because names is a list that
# would be ['foo'] for the first and ['foo', 'bar'] for the second
if type(t) == ast.Import:
imports.extend([name.name.split('.')[0] for name in t.names])
# ast.ImportFrom represents lines like 'from foo import bar'
# t.level == 0 is to get rid of 'from .foo import bar' and higher levels
# of relative importing
if type(t) == ast.ImportFrom:
if t.level > 0:
if conf['ignore_relative_imports'] or not t.module:
continue
else:
imports.append(t.module.split('.')[0])
return list(imports)
def iterate_over_library(path_to_source_code):
libs = set()
for parent, folders, files in os.walk(path_to_source_code):
for file in files:
if file.endswith('.py'):
print('.', end='')
full_file_path = os.path.join(parent, file)
with open(full_file_path, 'r') as f:
code = f.read()
libs.update(set(get_imported_libs(code)))
if conf['ignore_builtin_modules']:
if not conf['pyver']:
pyver = '%s.%s' % (sys.version_info.major, sys.version_info.minor)
std_libs = stdlib_list("3.4")
# print(std_libs)
libs = [lib for lib in libs if lib not in std_libs]
return libs
// ... rest of the code ...
|
0039eefbfa546f24b3f10031e664341d60e4055c
|
ranger/commands.py
|
ranger/commands.py
|
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
|
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
|
Use previews in ranger fzf
|
Use previews in ranger fzf
|
Python
|
mit
|
darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles
|
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
- command="fd -t d --hidden | fzf +m"
+ command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
- command="fd --hidden | fzf +m"
+ command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
|
Use previews in ranger fzf
|
## Code Before:
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
## Instruction:
Use previews in ranger fzf
## Code After:
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
|
...
# match only directories
command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
...
# match files and directories
command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
...
|
0d37a94593a7749dca4b2553334f1b67c946d3f8
|
ambassador/tests/t_lua_scripts.py
|
ambassador/tests/t_lua_scripts.py
|
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
class LuaTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + self.format('''
---
apiVersion: getambassador.io/v1
kind: Module
metadata:
name: ambassador
spec:
ambassador_id: {self.ambassador_id}
config:
lua_scripts: |
function envoy_on_response(response_handle)
response_handle: headers():add("Lua-Scripts-Enabled", "Processed")
end
---
apiVersion: getambassador.io/v1
kind: Mapping
metadata:
name: lua-target-mapping
spec:
ambassador_id: {self.ambassador_id}
prefix: /target/
service: {self.target.path.fqdn}
''')
def queries(self):
yield Query(self.url("target/"))
def check(self):
for r in self.results:
assert r.headers.get('Lua-Scripts-Enabled', None) == ['Processed']
|
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
class LuaTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
self.env = ["LUA_SCRIPTS_ENABLED=Processed"]
def manifests(self) -> str:
return super().manifests() + self.format('''
---
apiVersion: getambassador.io/v1
kind: Module
metadata:
name: ambassador
spec:
ambassador_id: {self.ambassador_id}
config:
lua_scripts: |
function envoy_on_response(response_handle)
response_handle: headers():add("Lua-Scripts-Enabled", "${LUA_SCRIPTS_ENABLED}")
end
---
apiVersion: getambassador.io/v1
kind: Mapping
metadata:
name: lua-target-mapping
spec:
ambassador_id: {self.ambassador_id}
prefix: /target/
service: {self.target.path.fqdn}
''')
def queries(self):
yield Query(self.url("target/"))
def check(self):
for r in self.results:
assert r.headers.get('Lua-Scripts-Enabled', None) == ['Processed']
|
Update LUA test to perform interpolation
|
Update LUA test to perform interpolation
|
Python
|
apache-2.0
|
datawire/ambassador,datawire/ambassador,datawire/ambassador,datawire/ambassador,datawire/ambassador
|
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
class LuaTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
+ self.env = ["LUA_SCRIPTS_ENABLED=Processed"]
def manifests(self) -> str:
return super().manifests() + self.format('''
---
apiVersion: getambassador.io/v1
kind: Module
metadata:
name: ambassador
spec:
ambassador_id: {self.ambassador_id}
config:
lua_scripts: |
function envoy_on_response(response_handle)
- response_handle: headers():add("Lua-Scripts-Enabled", "Processed")
+ response_handle: headers():add("Lua-Scripts-Enabled", "${LUA_SCRIPTS_ENABLED}")
end
---
apiVersion: getambassador.io/v1
kind: Mapping
metadata:
name: lua-target-mapping
spec:
ambassador_id: {self.ambassador_id}
prefix: /target/
service: {self.target.path.fqdn}
''')
def queries(self):
yield Query(self.url("target/"))
def check(self):
for r in self.results:
assert r.headers.get('Lua-Scripts-Enabled', None) == ['Processed']
|
Update LUA test to perform interpolation
|
## Code Before:
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
class LuaTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
def manifests(self) -> str:
return super().manifests() + self.format('''
---
apiVersion: getambassador.io/v1
kind: Module
metadata:
name: ambassador
spec:
ambassador_id: {self.ambassador_id}
config:
lua_scripts: |
function envoy_on_response(response_handle)
response_handle: headers():add("Lua-Scripts-Enabled", "Processed")
end
---
apiVersion: getambassador.io/v1
kind: Mapping
metadata:
name: lua-target-mapping
spec:
ambassador_id: {self.ambassador_id}
prefix: /target/
service: {self.target.path.fqdn}
''')
def queries(self):
yield Query(self.url("target/"))
def check(self):
for r in self.results:
assert r.headers.get('Lua-Scripts-Enabled', None) == ['Processed']
## Instruction:
Update LUA test to perform interpolation
## Code After:
from kat.harness import Query
from abstract_tests import AmbassadorTest, ServiceType, HTTP
class LuaTest(AmbassadorTest):
target: ServiceType
def init(self):
self.target = HTTP()
self.env = ["LUA_SCRIPTS_ENABLED=Processed"]
def manifests(self) -> str:
return super().manifests() + self.format('''
---
apiVersion: getambassador.io/v1
kind: Module
metadata:
name: ambassador
spec:
ambassador_id: {self.ambassador_id}
config:
lua_scripts: |
function envoy_on_response(response_handle)
response_handle: headers():add("Lua-Scripts-Enabled", "${LUA_SCRIPTS_ENABLED}")
end
---
apiVersion: getambassador.io/v1
kind: Mapping
metadata:
name: lua-target-mapping
spec:
ambassador_id: {self.ambassador_id}
prefix: /target/
service: {self.target.path.fqdn}
''')
def queries(self):
yield Query(self.url("target/"))
def check(self):
for r in self.results:
assert r.headers.get('Lua-Scripts-Enabled', None) == ['Processed']
|
// ... existing code ...
self.target = HTTP()
self.env = ["LUA_SCRIPTS_ENABLED=Processed"]
// ... modified code ...
function envoy_on_response(response_handle)
response_handle: headers():add("Lua-Scripts-Enabled", "${LUA_SCRIPTS_ENABLED}")
end
// ... rest of the code ...
|
1843e34bba0343cd3600f3c8934ae29b4b365554
|
chstrings/chstrings_test.py
|
chstrings/chstrings_test.py
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up. Use the fallback
# lang_tag across all tests.
lang_tag = cfg.lang_code
if cfg.accept_language:
lang_tag = cfg.accept_language[-1]
self.assertNotEqual({},
chstrings.get_localized_strings(cfg, lang_tag))
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
Extend chstrings smoke test a little more.
|
Extend chstrings smoke test a little more.
|
Python
|
mit
|
eggpi/citationhunt,guilherme-pg/citationhunt,eggpi/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt,guilherme-pg/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt
|
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
- # We just want to see if this will blow up
+ # We just want to see if this will blow up. Use the fallback
+ # lang_tag across all tests.
+ lang_tag = cfg.lang_code
+ if cfg.accept_language:
+ lang_tag = cfg.accept_language[-1]
+ self.assertNotEqual({},
- chstrings.get_localized_strings(cfg, cfg.lang_code)
+ chstrings.get_localized_strings(cfg, lang_tag))
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
Extend chstrings smoke test a little more.
|
## Code Before:
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up
chstrings.get_localized_strings(cfg, cfg.lang_code)
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
## Instruction:
Extend chstrings smoke test a little more.
## Code After:
import chstrings
import config
import unittest
class CHStringsTest(unittest.TestCase):
@classmethod
def add_smoke_test(cls, cfg):
def test(self):
# We just want to see if this will blow up. Use the fallback
# lang_tag across all tests.
lang_tag = cfg.lang_code
if cfg.accept_language:
lang_tag = cfg.accept_language[-1]
self.assertNotEqual({},
chstrings.get_localized_strings(cfg, lang_tag))
name = 'test_' + cfg.lang_code + '_smoke_test'
setattr(cls, name, test)
if __name__ == '__main__':
for lc in config.LANG_CODES_TO_LANG_NAMES:
cfg = config.get_localized_config(lc)
CHStringsTest.add_smoke_test(cfg)
unittest.main()
|
...
def test(self):
# We just want to see if this will blow up. Use the fallback
# lang_tag across all tests.
lang_tag = cfg.lang_code
if cfg.accept_language:
lang_tag = cfg.accept_language[-1]
self.assertNotEqual({},
chstrings.get_localized_strings(cfg, lang_tag))
name = 'test_' + cfg.lang_code + '_smoke_test'
...
|
7c68a78a81721ecbbda0f999576b91b803a34a3e
|
.circleci/get-commit-range.py
|
.circleci/get-commit-range.py
|
import os
import argparse
from github import Github
def from_pr(project, repo, pr_number):
gh = Github()
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
base = pr.base.ref
head = pr.head.ref
return f'origin/{base}...{head}'
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'project',
default=os.environ['CIRCLE_PROJECT_USERNAME'],
nargs='?'
)
argparser.add_argument(
'repo',
default=os.environ['CIRCLE_PROJECT_REPONAME'],
nargs='?'
)
argparser.add_argument(
'--pr-number',
type=int,
nargs='?'
)
args = argparser.parse_args()
if not args.pr_number:
pr_number = int(os.environ['CIRCLE_PR_NUMBER'])
else:
pr_number = args.pr_number
print(from_pr(args.project, args.repo, pr_number))
if __name__ == '__main__':
main()
|
import os
import argparse
from github import Github
def from_pr(project, repo, pr_number):
gh = Github()
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
base = pr.base.sha
head = pr.base.sha
return f'{base}...{head}'
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'project',
default=os.environ['CIRCLE_PROJECT_USERNAME'],
nargs='?'
)
argparser.add_argument(
'repo',
default=os.environ['CIRCLE_PROJECT_REPONAME'],
nargs='?'
)
argparser.add_argument(
'--pr-number',
type=int,
nargs='?'
)
args = argparser.parse_args()
if not args.pr_number:
pr_number = int(os.environ['CIRCLE_PR_NUMBER'])
else:
pr_number = args.pr_number
print(from_pr(args.project, args.repo, pr_number))
if __name__ == '__main__':
main()
|
Use SHAs for commit_range rather than refs
|
Use SHAs for commit_range rather than refs
Refs are local and might not always be present in the checkout.
|
Python
|
bsd-3-clause
|
ryanlovett/datahub,berkeley-dsep-infra/datahub,ryanlovett/datahub,ryanlovett/datahub,berkeley-dsep-infra/datahub,berkeley-dsep-infra/datahub
|
import os
import argparse
from github import Github
def from_pr(project, repo, pr_number):
gh = Github()
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
- base = pr.base.ref
+ base = pr.base.sha
- head = pr.head.ref
+ head = pr.base.sha
- return f'origin/{base}...{head}'
+ return f'{base}...{head}'
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'project',
default=os.environ['CIRCLE_PROJECT_USERNAME'],
nargs='?'
)
argparser.add_argument(
'repo',
default=os.environ['CIRCLE_PROJECT_REPONAME'],
nargs='?'
)
argparser.add_argument(
'--pr-number',
type=int,
nargs='?'
)
args = argparser.parse_args()
if not args.pr_number:
pr_number = int(os.environ['CIRCLE_PR_NUMBER'])
else:
pr_number = args.pr_number
print(from_pr(args.project, args.repo, pr_number))
if __name__ == '__main__':
main()
|
Use SHAs for commit_range rather than refs
|
## Code Before:
import os
import argparse
from github import Github
def from_pr(project, repo, pr_number):
gh = Github()
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
base = pr.base.ref
head = pr.head.ref
return f'origin/{base}...{head}'
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'project',
default=os.environ['CIRCLE_PROJECT_USERNAME'],
nargs='?'
)
argparser.add_argument(
'repo',
default=os.environ['CIRCLE_PROJECT_REPONAME'],
nargs='?'
)
argparser.add_argument(
'--pr-number',
type=int,
nargs='?'
)
args = argparser.parse_args()
if not args.pr_number:
pr_number = int(os.environ['CIRCLE_PR_NUMBER'])
else:
pr_number = args.pr_number
print(from_pr(args.project, args.repo, pr_number))
if __name__ == '__main__':
main()
## Instruction:
Use SHAs for commit_range rather than refs
## Code After:
import os
import argparse
from github import Github
def from_pr(project, repo, pr_number):
gh = Github()
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
base = pr.base.sha
head = pr.base.sha
return f'{base}...{head}'
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument(
'project',
default=os.environ['CIRCLE_PROJECT_USERNAME'],
nargs='?'
)
argparser.add_argument(
'repo',
default=os.environ['CIRCLE_PROJECT_REPONAME'],
nargs='?'
)
argparser.add_argument(
'--pr-number',
type=int,
nargs='?'
)
args = argparser.parse_args()
if not args.pr_number:
pr_number = int(os.environ['CIRCLE_PR_NUMBER'])
else:
pr_number = args.pr_number
print(from_pr(args.project, args.repo, pr_number))
if __name__ == '__main__':
main()
|
// ... existing code ...
pr = gh.get_repo(f'{project}/{repo}').get_pull(pr_number)
base = pr.base.sha
head = pr.base.sha
return f'{base}...{head}'
// ... rest of the code ...
|
76d9d1beaebf5b7d8e2ef051c8b8926724542387
|
regenesis/storage.py
|
regenesis/storage.py
|
import os
import json
from regenesis.core import app
def cube_path(cube_name, ext):
return os.path.join(
app.config.get('DATA_DIRECTORY'),
cube_name + '.' + ext
)
def store_cube_raw(cube_name, cube_data):
fh = open(cube_path(cube_name, 'raw'), 'wb')
fh.write(cube_data.encode('utf-8'))
fh.close()
def load_cube_raw(cube_name):
fh = open(cube_path(cube_name, 'raw'), 'rb')
data = fh.read().decode('utf-8')
fh.close()
return data
def dump_cube_json(cube):
fh = open(cube_path(cube.name, 'json'), 'wb')
json.dump(cube, fh, cls=JSONEncoder, indent=2)
fh.close()
|
import os
import json
from regenesis.core import app
def cube_path(cube_name, ext):
return os.path.join(
app.config.get('DATA_DIRECTORY'),
cube_name + '.' + ext
)
def exists_raw(cube_name):
return os.path.isfile(cube_path(cube_name, 'raw'))
def store_cube_raw(cube_name, cube_data):
fh = open(cube_path(cube_name, 'raw'), 'wb')
fh.write(cube_data.encode('utf-8'))
fh.close()
def load_cube_raw(cube_name):
fh = open(cube_path(cube_name, 'raw'), 'rb')
data = fh.read().decode('utf-8')
fh.close()
return data
def dump_cube_json(cube):
fh = open(cube_path(cube.name, 'json'), 'wb')
json.dump(cube, fh, cls=JSONEncoder, indent=2)
fh.close()
|
Check if a cube exists on disk.
|
Check if a cube exists on disk.
|
Python
|
mit
|
pudo/regenesis,pudo/regenesis
|
import os
import json
from regenesis.core import app
def cube_path(cube_name, ext):
return os.path.join(
app.config.get('DATA_DIRECTORY'),
cube_name + '.' + ext
)
+
+ def exists_raw(cube_name):
+ return os.path.isfile(cube_path(cube_name, 'raw'))
def store_cube_raw(cube_name, cube_data):
fh = open(cube_path(cube_name, 'raw'), 'wb')
fh.write(cube_data.encode('utf-8'))
fh.close()
def load_cube_raw(cube_name):
fh = open(cube_path(cube_name, 'raw'), 'rb')
data = fh.read().decode('utf-8')
fh.close()
return data
def dump_cube_json(cube):
fh = open(cube_path(cube.name, 'json'), 'wb')
json.dump(cube, fh, cls=JSONEncoder, indent=2)
fh.close()
+
|
Check if a cube exists on disk.
|
## Code Before:
import os
import json
from regenesis.core import app
def cube_path(cube_name, ext):
return os.path.join(
app.config.get('DATA_DIRECTORY'),
cube_name + '.' + ext
)
def store_cube_raw(cube_name, cube_data):
fh = open(cube_path(cube_name, 'raw'), 'wb')
fh.write(cube_data.encode('utf-8'))
fh.close()
def load_cube_raw(cube_name):
fh = open(cube_path(cube_name, 'raw'), 'rb')
data = fh.read().decode('utf-8')
fh.close()
return data
def dump_cube_json(cube):
fh = open(cube_path(cube.name, 'json'), 'wb')
json.dump(cube, fh, cls=JSONEncoder, indent=2)
fh.close()
## Instruction:
Check if a cube exists on disk.
## Code After:
import os
import json
from regenesis.core import app
def cube_path(cube_name, ext):
return os.path.join(
app.config.get('DATA_DIRECTORY'),
cube_name + '.' + ext
)
def exists_raw(cube_name):
return os.path.isfile(cube_path(cube_name, 'raw'))
def store_cube_raw(cube_name, cube_data):
fh = open(cube_path(cube_name, 'raw'), 'wb')
fh.write(cube_data.encode('utf-8'))
fh.close()
def load_cube_raw(cube_name):
fh = open(cube_path(cube_name, 'raw'), 'rb')
data = fh.read().decode('utf-8')
fh.close()
return data
def dump_cube_json(cube):
fh = open(cube_path(cube.name, 'json'), 'wb')
json.dump(cube, fh, cls=JSONEncoder, indent=2)
fh.close()
|
// ... existing code ...
)
def exists_raw(cube_name):
return os.path.isfile(cube_path(cube_name, 'raw'))
// ... modified code ...
fh.close()
// ... rest of the code ...
|
0cad0060afcc5936541b3739ff03c06b8a773ec8
|
tests/templatetags/test_tags.py
|
tests/templatetags/test_tags.py
|
from django import template
from lazy_tags.decorators import lazy_tag
register = template.Library()
@register.simple_tag
def test():
return '<p>hello world</p>'
@register.simple_tag
@lazy_tag
def test_decorator():
return 'Success!'
@register.simple_tag
@lazy_tag
def test_simple_dec_args(arg, kwarg=None):
return '{0} {1}'.format(arg, kwarg)
@register.inclusion_tag('tests/decorator_tag_with_args.html')
@lazy_tag
def test_decorator_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_with_sleep():
import time
time.sleep(2)
return '<ul style="text-align: left;"><li>Steve Jobs</li><li>Bill Gates</li><li>Elon Musk</li></ul>'
@register.inclusion_tag('tests/inclusion_tag_with_args.html')
def test_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_orm(user):
return '<p>{} | {}</p>'.format(user.username, user.email)
@register.inclusion_tag('tests/inclusion_tag.html')
def inclusion():
return {'test': 'hello world'}
|
from django import template
from django.utils.safestring import mark_safe
from lazy_tags.decorators import lazy_tag
register = template.Library()
@register.simple_tag
def test():
return mark_safe('<p>hello world</p>')
@register.simple_tag
@lazy_tag
def test_decorator():
return 'Success!'
@register.simple_tag
@lazy_tag
def test_simple_dec_args(arg, kwarg=None):
return '{0} {1}'.format(arg, kwarg)
@register.inclusion_tag('tests/decorator_tag_with_args.html')
@lazy_tag
def test_decorator_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_with_sleep():
import time
time.sleep(2)
return '<ul style="text-align: left;"><li>Steve Jobs</li><li>Bill Gates</li><li>Elon Musk</li></ul>'
@register.inclusion_tag('tests/inclusion_tag_with_args.html')
def test_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_orm(user):
return '<p>{} | {}</p>'.format(user.username, user.email)
@register.inclusion_tag('tests/inclusion_tag.html')
def inclusion():
return {'test': 'hello world'}
|
Fix test on Django 1.9
|
Fix test on Django 1.9
|
Python
|
mit
|
grantmcconnaughey/django-lazy-tags,grantmcconnaughey/django-lazy-tags
|
from django import template
+ from django.utils.safestring import mark_safe
from lazy_tags.decorators import lazy_tag
register = template.Library()
@register.simple_tag
def test():
- return '<p>hello world</p>'
+ return mark_safe('<p>hello world</p>')
@register.simple_tag
@lazy_tag
def test_decorator():
return 'Success!'
@register.simple_tag
@lazy_tag
def test_simple_dec_args(arg, kwarg=None):
return '{0} {1}'.format(arg, kwarg)
@register.inclusion_tag('tests/decorator_tag_with_args.html')
@lazy_tag
def test_decorator_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_with_sleep():
import time
time.sleep(2)
return '<ul style="text-align: left;"><li>Steve Jobs</li><li>Bill Gates</li><li>Elon Musk</li></ul>'
@register.inclusion_tag('tests/inclusion_tag_with_args.html')
def test_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_orm(user):
return '<p>{} | {}</p>'.format(user.username, user.email)
@register.inclusion_tag('tests/inclusion_tag.html')
def inclusion():
return {'test': 'hello world'}
|
Fix test on Django 1.9
|
## Code Before:
from django import template
from lazy_tags.decorators import lazy_tag
register = template.Library()
@register.simple_tag
def test():
return '<p>hello world</p>'
@register.simple_tag
@lazy_tag
def test_decorator():
return 'Success!'
@register.simple_tag
@lazy_tag
def test_simple_dec_args(arg, kwarg=None):
return '{0} {1}'.format(arg, kwarg)
@register.inclusion_tag('tests/decorator_tag_with_args.html')
@lazy_tag
def test_decorator_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_with_sleep():
import time
time.sleep(2)
return '<ul style="text-align: left;"><li>Steve Jobs</li><li>Bill Gates</li><li>Elon Musk</li></ul>'
@register.inclusion_tag('tests/inclusion_tag_with_args.html')
def test_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_orm(user):
return '<p>{} | {}</p>'.format(user.username, user.email)
@register.inclusion_tag('tests/inclusion_tag.html')
def inclusion():
return {'test': 'hello world'}
## Instruction:
Fix test on Django 1.9
## Code After:
from django import template
from django.utils.safestring import mark_safe
from lazy_tags.decorators import lazy_tag
register = template.Library()
@register.simple_tag
def test():
return mark_safe('<p>hello world</p>')
@register.simple_tag
@lazy_tag
def test_decorator():
return 'Success!'
@register.simple_tag
@lazy_tag
def test_simple_dec_args(arg, kwarg=None):
return '{0} {1}'.format(arg, kwarg)
@register.inclusion_tag('tests/decorator_tag_with_args.html')
@lazy_tag
def test_decorator_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_with_sleep():
import time
time.sleep(2)
return '<ul style="text-align: left;"><li>Steve Jobs</li><li>Bill Gates</li><li>Elon Musk</li></ul>'
@register.inclusion_tag('tests/inclusion_tag_with_args.html')
def test_with_args(arg, kwarg=None):
return {
'arg': arg,
'kwarg': kwarg
}
@register.simple_tag
def test_orm(user):
return '<p>{} | {}</p>'.format(user.username, user.email)
@register.inclusion_tag('tests/inclusion_tag.html')
def inclusion():
return {'test': 'hello world'}
|
// ... existing code ...
from django import template
from django.utils.safestring import mark_safe
// ... modified code ...
def test():
return mark_safe('<p>hello world</p>')
// ... rest of the code ...
|
b22b292ec2b839d611738928f41c79723146ea15
|
readthedocs/core/migrations/0005_migrate-old-passwords.py
|
readthedocs/core/migrations/0005_migrate-old-passwords.py
|
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.hashers import make_password
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.password = make_password(None)
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate old passwords without "set_unusable_password"
|
Migrate old passwords without "set_unusable_password"
|
Python
|
mit
|
rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org
|
from __future__ import unicode_literals
from django.db import migrations
+ from django.contrib.auth.hashers import make_password
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
- user.set_unusable_password()
+ user.password = make_password(None)
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
Migrate old passwords without "set_unusable_password"
|
## Code Before:
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.set_unusable_password()
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
## Instruction:
Migrate old passwords without "set_unusable_password"
## Code After:
from __future__ import unicode_literals
from django.db import migrations
from django.contrib.auth.hashers import make_password
def forwards_func(apps, schema_editor):
User = apps.get_model('auth', 'User')
old_password_patterns = (
'sha1$',
# RTD's production database doesn't have any of these
# but they are included for completeness
'md5$',
'crypt$',
)
for pattern in old_password_patterns:
users = User.objects.filter(password__startswith=pattern)
for user in users:
user.password = make_password(None)
user.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0004_ad-opt-out'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.RunPython(forwards_func),
]
|
// ... existing code ...
from django.db import migrations
from django.contrib.auth.hashers import make_password
// ... modified code ...
for user in users:
user.password = make_password(None)
user.save()
// ... rest of the code ...
|
ee1deb28a2c32b7e35a2132542edd69f3c785c9c
|
django/projects/mysite/run-gevent.py
|
django/projects/mysite/run-gevent.py
|
from gevent import monkey
monkey.patch_all(httplib=True)
# Import the rest
from django.core.handlers.wsgi import WSGIHandler as DjangoWSGIApp
from django.core.management import setup_environ
from gevent.wsgi import WSGIServer
import sys
import settings
setup_environ(settings)
# Configure host and port for the WSGI server
host = getattr(settings, 'WSGI_HOST', '127.0.0.1')
port = getattr(settings, 'WSGI_PORT', 8080)
def runserver():
# Create the server
application = DjangoWSGIApp()
address = host, port
server = WSGIServer( address, application )
# Run the server
try:
server.serve_forever()
except KeyboardInterrupt:
server.stop()
sys.exit(0)
if __name__ == '__main__':
runserver()
|
from gevent import monkey
monkey.patch_all()
# Import the rest
from django.core.handlers.wsgi import WSGIHandler as DjangoWSGIApp
from django.core.management import setup_environ
from gevent.wsgi import WSGIServer
import sys
import settings
setup_environ(settings)
# Configure host and port for the WSGI server
host = getattr(settings, 'WSGI_HOST', '127.0.0.1')
port = getattr(settings, 'WSGI_PORT', 8080)
def runserver():
# Create the server
application = DjangoWSGIApp()
address = host, port
server = WSGIServer( address, application )
# Run the server
try:
server.serve_forever()
except KeyboardInterrupt:
server.stop()
sys.exit(0)
if __name__ == '__main__':
runserver()
|
Fix httplib monkey patching problem with Gevent >= 1.0
|
Fix httplib monkey patching problem with Gevent >= 1.0
From v1.0 on, Gevent doesn't support monkey patching of httplib anymore.
CATMAID's example script to run a Gevent WSGI server, however, was still
expecting this to be possible. This commit fixes this.
Thanks to Mikhail Kandel for reporting.
|
Python
|
agpl-3.0
|
fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID
|
from gevent import monkey
- monkey.patch_all(httplib=True)
+ monkey.patch_all()
# Import the rest
from django.core.handlers.wsgi import WSGIHandler as DjangoWSGIApp
from django.core.management import setup_environ
from gevent.wsgi import WSGIServer
import sys
import settings
setup_environ(settings)
# Configure host and port for the WSGI server
host = getattr(settings, 'WSGI_HOST', '127.0.0.1')
port = getattr(settings, 'WSGI_PORT', 8080)
def runserver():
# Create the server
application = DjangoWSGIApp()
address = host, port
server = WSGIServer( address, application )
# Run the server
try:
server.serve_forever()
except KeyboardInterrupt:
server.stop()
sys.exit(0)
if __name__ == '__main__':
runserver()
|
Fix httplib monkey patching problem with Gevent >= 1.0
|
## Code Before:
from gevent import monkey
monkey.patch_all(httplib=True)
# Import the rest
from django.core.handlers.wsgi import WSGIHandler as DjangoWSGIApp
from django.core.management import setup_environ
from gevent.wsgi import WSGIServer
import sys
import settings
setup_environ(settings)
# Configure host and port for the WSGI server
host = getattr(settings, 'WSGI_HOST', '127.0.0.1')
port = getattr(settings, 'WSGI_PORT', 8080)
def runserver():
# Create the server
application = DjangoWSGIApp()
address = host, port
server = WSGIServer( address, application )
# Run the server
try:
server.serve_forever()
except KeyboardInterrupt:
server.stop()
sys.exit(0)
if __name__ == '__main__':
runserver()
## Instruction:
Fix httplib monkey patching problem with Gevent >= 1.0
## Code After:
from gevent import monkey
monkey.patch_all()
# Import the rest
from django.core.handlers.wsgi import WSGIHandler as DjangoWSGIApp
from django.core.management import setup_environ
from gevent.wsgi import WSGIServer
import sys
import settings
setup_environ(settings)
# Configure host and port for the WSGI server
host = getattr(settings, 'WSGI_HOST', '127.0.0.1')
port = getattr(settings, 'WSGI_PORT', 8080)
def runserver():
# Create the server
application = DjangoWSGIApp()
address = host, port
server = WSGIServer( address, application )
# Run the server
try:
server.serve_forever()
except KeyboardInterrupt:
server.stop()
sys.exit(0)
if __name__ == '__main__':
runserver()
|
# ... existing code ...
from gevent import monkey
monkey.patch_all()
# ... rest of the code ...
|
710a2a6d9c462041bae6c41f0578d99262c6a861
|
tests/test_execute.py
|
tests/test_execute.py
|
import asyncpg
from asyncpg import _testbase as tb
class TestExecuteScript(tb.ConnectedTestCase):
async def test_execute_script_1(self):
r = await self.con.execute('''
SELECT 1;
SELECT true FROM pg_type WHERE false = true;
SELECT 2;
''')
self.assertIsNone(r)
async def test_execute_script_check_transactionality(self):
with self.assertRaises(asyncpg.Error):
await self.con.execute('''
CREATE TABLE mytab (a int);
SELECT * FROM mytab WHERE 1 / 0 = 1;
''')
with self.assertRaisesRegex(asyncpg.Error, '"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
|
import asyncpg
from asyncpg import _testbase as tb
class TestExecuteScript(tb.ConnectedTestCase):
async def test_execute_script_1(self):
r = await self.con.execute('''
SELECT 1;
SELECT true FROM pg_type WHERE false = true;
SELECT 2;
''')
self.assertIsNone(r)
async def test_execute_script_check_transactionality(self):
with self.assertRaises(asyncpg.Error):
await self.con.execute('''
CREATE TABLE mytab (a int);
SELECT * FROM mytab WHERE 1 / 0 = 1;
''')
with self.assertRaisesRegex(asyncpg.Error, '"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
async def test_execute_exceptions_1(self):
with self.assertRaisesRegex(asyncpg.Error,
'relation "__dne__" does not exist'):
await self.con.execute('select * from __dne__')
|
Test that con.execute() propagate Postgres exceptions
|
Test that con.execute() propagate Postgres exceptions
|
Python
|
apache-2.0
|
MagicStack/asyncpg,MagicStack/asyncpg
|
import asyncpg
from asyncpg import _testbase as tb
class TestExecuteScript(tb.ConnectedTestCase):
async def test_execute_script_1(self):
r = await self.con.execute('''
SELECT 1;
SELECT true FROM pg_type WHERE false = true;
SELECT 2;
''')
self.assertIsNone(r)
async def test_execute_script_check_transactionality(self):
with self.assertRaises(asyncpg.Error):
await self.con.execute('''
CREATE TABLE mytab (a int);
SELECT * FROM mytab WHERE 1 / 0 = 1;
''')
with self.assertRaisesRegex(asyncpg.Error, '"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
+ async def test_execute_exceptions_1(self):
+ with self.assertRaisesRegex(asyncpg.Error,
+ 'relation "__dne__" does not exist'):
+
+ await self.con.execute('select * from __dne__')
+
|
Test that con.execute() propagate Postgres exceptions
|
## Code Before:
import asyncpg
from asyncpg import _testbase as tb
class TestExecuteScript(tb.ConnectedTestCase):
async def test_execute_script_1(self):
r = await self.con.execute('''
SELECT 1;
SELECT true FROM pg_type WHERE false = true;
SELECT 2;
''')
self.assertIsNone(r)
async def test_execute_script_check_transactionality(self):
with self.assertRaises(asyncpg.Error):
await self.con.execute('''
CREATE TABLE mytab (a int);
SELECT * FROM mytab WHERE 1 / 0 = 1;
''')
with self.assertRaisesRegex(asyncpg.Error, '"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
## Instruction:
Test that con.execute() propagate Postgres exceptions
## Code After:
import asyncpg
from asyncpg import _testbase as tb
class TestExecuteScript(tb.ConnectedTestCase):
async def test_execute_script_1(self):
r = await self.con.execute('''
SELECT 1;
SELECT true FROM pg_type WHERE false = true;
SELECT 2;
''')
self.assertIsNone(r)
async def test_execute_script_check_transactionality(self):
with self.assertRaises(asyncpg.Error):
await self.con.execute('''
CREATE TABLE mytab (a int);
SELECT * FROM mytab WHERE 1 / 0 = 1;
''')
with self.assertRaisesRegex(asyncpg.Error, '"mytab" does not exist'):
await self.con.prepare('''
SELECT * FROM mytab
''')
async def test_execute_exceptions_1(self):
with self.assertRaisesRegex(asyncpg.Error,
'relation "__dne__" does not exist'):
await self.con.execute('select * from __dne__')
|
...
''')
async def test_execute_exceptions_1(self):
with self.assertRaisesRegex(asyncpg.Error,
'relation "__dne__" does not exist'):
await self.con.execute('select * from __dne__')
...
|
4eb4a2eaa42cd71bf4427bdaaa1e853975432691
|
graphene/storage/intermediate/general_store_manager.py
|
graphene/storage/intermediate/general_store_manager.py
|
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self, **kwargs):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id, **kwargs)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
Allow keyword arguments in GeneralStoreManager.create_item method
|
Allow keyword arguments in GeneralStoreManager.create_item method
|
Python
|
apache-2.0
|
PHB-CS123/graphene,PHB-CS123/graphene,PHB-CS123/graphene
|
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
- def create_item(self):
+ def create_item(self, **kwargs):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
- return self.store.STORAGE_TYPE(available_id)
+ return self.store.STORAGE_TYPE(available_id, **kwargs)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
Allow keyword arguments in GeneralStoreManager.create_item method
|
## Code Before:
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
## Instruction:
Allow keyword arguments in GeneralStoreManager.create_item method
## Code After:
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self, **kwargs):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id, **kwargs)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
// ... existing code ...
def create_item(self, **kwargs):
"""
// ... modified code ...
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id, **kwargs)
// ... rest of the code ...
|
582964f9da6029cd089117496babf9267c41ecd5
|
evewspace/core/utils.py
|
evewspace/core/utils.py
|
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
if ConfigEntry.objects.filter(name=name, user=user).count() != 0:
return ConfigEntry.objects.get(name=name, user=user)
# No user value, look for global / default
if ConfigEntry.objects.filter(name=name, user=None).count() != 0:
return ConfigEntry.objects.get(name=name, user=None)
else:
raise KeyError("No configuration entry with key %s was found." % name)
|
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
try:
return ConfigEntry.objects.get(name=name, user=user)
except ConfigEntry.DoesNotExist:
return ConfigEntry.objects.get(name=name, user=None)
|
Reduce queries used to lookup config
|
Reduce queries used to lookup config
|
Python
|
apache-2.0
|
evewspace/eve-wspace,nyrocron/eve-wspace,hybrid1969/eve-wspace,hybrid1969/eve-wspace,acdervis/eve-wspace,marbindrakon/eve-wspace,Unsettled/eve-wspace,proycon/eve-wspace,mmalyska/eve-wspace,evewspace/eve-wspace,gpapaz/eve-wspace,acdervis/eve-wspace,proycon/eve-wspace,marbindrakon/eve-wspace,Zumochi/eve-wspace,marbindrakon/eve-wspace,gpapaz/eve-wspace,Unsettled/eve-wspace,gpapaz/eve-wspace,hybrid1969/eve-wspace,Unsettled/eve-wspace,nyrocron/eve-wspace,proycon/eve-wspace,Zumochi/eve-wspace,nyrocron/eve-wspace,Maarten28/eve-wspace,Maarten28/eve-wspace,mmalyska/eve-wspace,acdervis/eve-wspace,marbindrakon/eve-wspace,proycon/eve-wspace,mmalyska/eve-wspace,Maarten28/eve-wspace,mmalyska/eve-wspace,Unsettled/eve-wspace,Zumochi/eve-wspace,evewspace/eve-wspace,Maarten28/eve-wspace,acdervis/eve-wspace,Zumochi/eve-wspace,gpapaz/eve-wspace,evewspace/eve-wspace,hybrid1969/eve-wspace,nyrocron/eve-wspace
|
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
- if ConfigEntry.objects.filter(name=name, user=user).count() != 0:
+ try:
return ConfigEntry.objects.get(name=name, user=user)
+ except ConfigEntry.DoesNotExist:
+ return ConfigEntry.objects.get(name=name, user=None)
- # No user value, look for global / default
- if ConfigEntry.objects.filter(name=name, user=None).count() != 0:
- return ConfigEntry.objects.get(name=name, user=None)
- else:
- raise KeyError("No configuration entry with key %s was found." % name)
-
|
Reduce queries used to lookup config
|
## Code Before:
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
if ConfigEntry.objects.filter(name=name, user=user).count() != 0:
return ConfigEntry.objects.get(name=name, user=user)
# No user value, look for global / default
if ConfigEntry.objects.filter(name=name, user=None).count() != 0:
return ConfigEntry.objects.get(name=name, user=None)
else:
raise KeyError("No configuration entry with key %s was found." % name)
## Instruction:
Reduce queries used to lookup config
## Code After:
from core.models import ConfigEntry
def get_config(name, user):
"""
Gets the correct config value for the given key name.
Value with the given user has priority over any default value.
"""
try:
return ConfigEntry.objects.get(name=name, user=user)
except ConfigEntry.DoesNotExist:
return ConfigEntry.objects.get(name=name, user=None)
|
# ... existing code ...
"""
try:
return ConfigEntry.objects.get(name=name, user=user)
except ConfigEntry.DoesNotExist:
return ConfigEntry.objects.get(name=name, user=None)
# ... rest of the code ...
|
127b90c88d1362e7b10e7bf36dff56b96a5c4f0b
|
simpegEM/FDEM/__init__.py
|
simpegEM/FDEM/__init__.py
|
from SurveyFDEM import *
from FDEM import ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h
|
from SurveyFDEM import *
from FDEM import BaseFDEMProblem, ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h, omega
|
Add more files to export on the init.
|
Add more files to export on the init.
|
Python
|
mit
|
simpeg/discretize,lheagy/simpegem,simpeg/discretize,simpeg/discretize,simpeg/simpeg,simpeg/simpegem
|
from SurveyFDEM import *
- from FDEM import ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h
+ from FDEM import BaseFDEMProblem, ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h, omega
|
Add more files to export on the init.
|
## Code Before:
from SurveyFDEM import *
from FDEM import ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h
## Instruction:
Add more files to export on the init.
## Code After:
from SurveyFDEM import *
from FDEM import BaseFDEMProblem, ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h, omega
|
// ... existing code ...
from SurveyFDEM import *
from FDEM import BaseFDEMProblem, ProblemFDEM_e, ProblemFDEM_b, ProblemFDEM_j, ProblemFDEM_h, omega
// ... rest of the code ...
|
6dcb33004c3775d707f362a6f2c8217c1d558f56
|
kobin/server_adapters.py
|
kobin/server_adapters.py
|
from typing import Dict, Any
class ServerAdapter:
quiet = False
def __init__(self, host: str='127.0.0.1', port: int=8080, **options) -> None:
self.options = options
self.host = host
self.port = int(port)
def run(self, handler):
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class WSGIRefServer(ServerAdapter):
def run(self, app):
from wsgiref.simple_server import make_server # type: ignore
self.httpd = make_server(self.host, self.port, app)
self.port = self.httpd.server_port
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
self.httpd.server_close()
raise
servers = {
'wsgiref': WSGIRefServer,
} # type: Dict[str, Any]
|
from typing import Dict, Any
class ServerAdapter:
quiet = False
def __init__(self, host: str='127.0.0.1', port: int=8080, **options) -> None:
self.options = options
self.host = host
self.port = int(port)
def run(self, handler):
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class WSGIRefServer(ServerAdapter):
def run(self, app):
from wsgiref.simple_server import make_server # type: ignore
self.httpd = make_server(self.host, self.port, app)
self.port = self.httpd.server_port
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
self.httpd.server_close()
raise
class GunicornServer(ServerAdapter):
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
servers = {
'wsgiref': WSGIRefServer,
'gunicorn': GunicornServer,
} # type: Dict[str, Any]
|
Add a gunicorn server adpter
|
Add a gunicorn server adpter
|
Python
|
mit
|
kobinpy/kobin,kobinpy/kobin,c-bata/kobin,c-bata/kobin
|
from typing import Dict, Any
class ServerAdapter:
quiet = False
def __init__(self, host: str='127.0.0.1', port: int=8080, **options) -> None:
self.options = options
self.host = host
self.port = int(port)
def run(self, handler):
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class WSGIRefServer(ServerAdapter):
def run(self, app):
from wsgiref.simple_server import make_server # type: ignore
self.httpd = make_server(self.host, self.port, app)
self.port = self.httpd.server_port
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
self.httpd.server_close()
raise
+
+ class GunicornServer(ServerAdapter):
+ def run(self, handler):
+ from gunicorn.app.base import Application
+
+ config = {'bind': "%s:%d" % (self.host, int(self.port))}
+ config.update(self.options)
+
+ class GunicornApplication(Application):
+ def init(self, parser, opts, args):
+ return config
+
+ def load(self):
+ return handler
+
+ GunicornApplication().run()
+
servers = {
'wsgiref': WSGIRefServer,
+ 'gunicorn': GunicornServer,
} # type: Dict[str, Any]
|
Add a gunicorn server adpter
|
## Code Before:
from typing import Dict, Any
class ServerAdapter:
quiet = False
def __init__(self, host: str='127.0.0.1', port: int=8080, **options) -> None:
self.options = options
self.host = host
self.port = int(port)
def run(self, handler):
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class WSGIRefServer(ServerAdapter):
def run(self, app):
from wsgiref.simple_server import make_server # type: ignore
self.httpd = make_server(self.host, self.port, app)
self.port = self.httpd.server_port
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
self.httpd.server_close()
raise
servers = {
'wsgiref': WSGIRefServer,
} # type: Dict[str, Any]
## Instruction:
Add a gunicorn server adpter
## Code After:
from typing import Dict, Any
class ServerAdapter:
quiet = False
def __init__(self, host: str='127.0.0.1', port: int=8080, **options) -> None:
self.options = options
self.host = host
self.port = int(port)
def run(self, handler):
pass
def __repr__(self):
args = ', '.join(['%s=%s' % (k, repr(v))
for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class WSGIRefServer(ServerAdapter):
def run(self, app):
from wsgiref.simple_server import make_server # type: ignore
self.httpd = make_server(self.host, self.port, app)
self.port = self.httpd.server_port
try:
self.httpd.serve_forever()
except KeyboardInterrupt:
self.httpd.server_close()
raise
class GunicornServer(ServerAdapter):
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
servers = {
'wsgiref': WSGIRefServer,
'gunicorn': GunicornServer,
} # type: Dict[str, Any]
|
...
class GunicornServer(ServerAdapter):
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
servers = {
...
'wsgiref': WSGIRefServer,
'gunicorn': GunicornServer,
} # type: Dict[str, Any]
...
|
69d18539fb4f394ca45d1116a521084c83ea21b5
|
icekit_events/migrations/0012_auto_20160706_1606.py
|
icekit_events/migrations/0012_auto_20160706_1606.py
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0011_event_show_in_calendar'),
]
operations = [
migrations.AlterModelTable(
name='event',
table=None,
),
migrations.AlterModelTable(
name='recurrencerule',
table=None,
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'",
# No-op: I haven't yet found a way to make this reversible in the
# way you would expect without unique constraint DB errors, whereas
# it works (according to unit tests at least) with a no-op.
"",
),
]
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0011_event_show_in_calendar'),
]
operations = [
migrations.AlterModelTable(
name='event',
table=None,
),
migrations.AlterModelTable(
name='recurrencerule',
table=None,
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'",
# No-op: I haven't yet found a way to make this reversible in the
# way you would expect without unique constraint DB errors, whereas
# it works (according to unit tests at least) with a no-op.
"UPDATE django_content_type SET app_label=app_label WHERE app_label='NONE!'",
),
]
|
Make migration reverse no-op a valid SQL query
|
Make migration reverse no-op a valid SQL query
When using a PostgreSQL database with Django 1.7 empty reverse query
statements in DB migrations cause an error, so we replace the empty
no-op statement with a valid query that still does nothing so the
reverse migration will work in this case.
This problem doesn't seem to affect Django 1.8, which must be smarter
about accepted but not actually running empty statements in DB
migrations.
|
Python
|
mit
|
ic-labs/icekit-events,ic-labs/icekit-events,ic-labs/icekit-events,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0011_event_show_in_calendar'),
]
operations = [
migrations.AlterModelTable(
name='event',
table=None,
),
migrations.AlterModelTable(
name='recurrencerule',
table=None,
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'",
# No-op: I haven't yet found a way to make this reversible in the
# way you would expect without unique constraint DB errors, whereas
# it works (according to unit tests at least) with a no-op.
- "",
+ "UPDATE django_content_type SET app_label=app_label WHERE app_label='NONE!'",
),
]
|
Make migration reverse no-op a valid SQL query
|
## Code Before:
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0011_event_show_in_calendar'),
]
operations = [
migrations.AlterModelTable(
name='event',
table=None,
),
migrations.AlterModelTable(
name='recurrencerule',
table=None,
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'",
# No-op: I haven't yet found a way to make this reversible in the
# way you would expect without unique constraint DB errors, whereas
# it works (according to unit tests at least) with a no-op.
"",
),
]
## Instruction:
Make migration reverse no-op a valid SQL query
## Code After:
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0011_event_show_in_calendar'),
]
operations = [
migrations.AlterModelTable(
name='event',
table=None,
),
migrations.AlterModelTable(
name='recurrencerule',
table=None,
),
migrations.RunSQL(
"UPDATE django_content_type SET app_label='icekit_events' WHERE app_label='eventkit'",
# No-op: I haven't yet found a way to make this reversible in the
# way you would expect without unique constraint DB errors, whereas
# it works (according to unit tests at least) with a no-op.
"UPDATE django_content_type SET app_label=app_label WHERE app_label='NONE!'",
),
]
|
# ... existing code ...
# it works (according to unit tests at least) with a no-op.
"UPDATE django_content_type SET app_label=app_label WHERE app_label='NONE!'",
),
# ... rest of the code ...
|
cfb0bda6096378de428a1460823626f3dc4c9059
|
spyder_terminal/__init__.py
|
spyder_terminal/__init__.py
|
"""Spyder Terminal Plugin."""
from .terminalplugin import TerminalPlugin as PLUGIN_CLASS
PLUGIN_CLASS
VERSION_INFO = (0, 2, 1)
__version__ = '.'.join(map(str, VERSION_INFO))
|
"""Spyder Terminal Plugin."""
from .terminalplugin import TerminalPlugin as PLUGIN_CLASS
PLUGIN_CLASS
VERSION_INFO = (0, 3, 0, 'dev0')
__version__ = '.'.join(map(str, VERSION_INFO))
|
Set package version info to 0.3.0.dev0
|
Set package version info to 0.3.0.dev0
|
Python
|
mit
|
spyder-ide/spyder-terminal,spyder-ide/spyder-terminal,andfoy/spyder-terminal,andfoy/spyder-terminal,andfoy/spyder-terminal,spyder-ide/spyder-terminal,spyder-ide/spyder-terminal
|
"""Spyder Terminal Plugin."""
from .terminalplugin import TerminalPlugin as PLUGIN_CLASS
PLUGIN_CLASS
- VERSION_INFO = (0, 2, 1)
+ VERSION_INFO = (0, 3, 0, 'dev0')
__version__ = '.'.join(map(str, VERSION_INFO))
|
Set package version info to 0.3.0.dev0
|
## Code Before:
"""Spyder Terminal Plugin."""
from .terminalplugin import TerminalPlugin as PLUGIN_CLASS
PLUGIN_CLASS
VERSION_INFO = (0, 2, 1)
__version__ = '.'.join(map(str, VERSION_INFO))
## Instruction:
Set package version info to 0.3.0.dev0
## Code After:
"""Spyder Terminal Plugin."""
from .terminalplugin import TerminalPlugin as PLUGIN_CLASS
PLUGIN_CLASS
VERSION_INFO = (0, 3, 0, 'dev0')
__version__ = '.'.join(map(str, VERSION_INFO))
|
// ... existing code ...
VERSION_INFO = (0, 3, 0, 'dev0')
__version__ = '.'.join(map(str, VERSION_INFO))
// ... rest of the code ...
|
985dd1ada1b2ad9ceaae111fa32b1d8e54b61786
|
mailqueue/tasks.py
|
mailqueue/tasks.py
|
from celery.task import task
from .models import MailerMessage
@task(name="tasks.send_mail")
def send_mail(pk):
message = MailerMessage.objects.get(pk=pk)
message._send()
@task()
def clear_sent_messages():
from mailqueue.models import MailerMessage
MailerMessage.objects.clear_sent_messages()
|
from celery.task import task
from .models import MailerMessage
@task(name="tasks.send_mail", default_retry_delay=5, max_retries=5)
def send_mail(pk):
message = MailerMessage.objects.get(pk=pk)
message._send()
# Retry when message is not sent
if not message.sent:
send_mail.retry([message.pk,])
@task()
def clear_sent_messages():
from mailqueue.models import MailerMessage
MailerMessage.objects.clear_sent_messages()
|
Add retry to celery task
|
Add retry to celery task
Messages do not always get delivered. Built in a retry when message is not sent.
Max retry count could also be a setting.
|
Python
|
mit
|
Goury/django-mail-queue,dstegelman/django-mail-queue,winfieldco/django-mail-queue,Goury/django-mail-queue,styrmis/django-mail-queue,dstegelman/django-mail-queue
|
from celery.task import task
from .models import MailerMessage
- @task(name="tasks.send_mail")
+ @task(name="tasks.send_mail", default_retry_delay=5, max_retries=5)
def send_mail(pk):
message = MailerMessage.objects.get(pk=pk)
message._send()
+
+ # Retry when message is not sent
+ if not message.sent:
+ send_mail.retry([message.pk,])
@task()
def clear_sent_messages():
from mailqueue.models import MailerMessage
MailerMessage.objects.clear_sent_messages()
|
Add retry to celery task
|
## Code Before:
from celery.task import task
from .models import MailerMessage
@task(name="tasks.send_mail")
def send_mail(pk):
message = MailerMessage.objects.get(pk=pk)
message._send()
@task()
def clear_sent_messages():
from mailqueue.models import MailerMessage
MailerMessage.objects.clear_sent_messages()
## Instruction:
Add retry to celery task
## Code After:
from celery.task import task
from .models import MailerMessage
@task(name="tasks.send_mail", default_retry_delay=5, max_retries=5)
def send_mail(pk):
message = MailerMessage.objects.get(pk=pk)
message._send()
# Retry when message is not sent
if not message.sent:
send_mail.retry([message.pk,])
@task()
def clear_sent_messages():
from mailqueue.models import MailerMessage
MailerMessage.objects.clear_sent_messages()
|
# ... existing code ...
@task(name="tasks.send_mail", default_retry_delay=5, max_retries=5)
def send_mail(pk):
# ... modified code ...
message._send()
# Retry when message is not sent
if not message.sent:
send_mail.retry([message.pk,])
# ... rest of the code ...
|
a31103d5001c7c6ebebddd25f9d1bb4ed0e0c2e9
|
polling_stations/apps/data_importers/management/commands/import_gosport.py
|
polling_stations/apps/data_importers/management/commands/import_gosport.py
|
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "GOS"
addresses_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Districts v1 (07 03 2022).csv"
stations_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Stations v1 (07 03 2022).csv"
elections = ["2022-05-05"]
def address_record_to_dict(self, record):
if record.addressline6 in ["PO12 2EH"]:
return None
return super().address_record_to_dict(record)
|
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "GOS"
addresses_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Districts v1 (07 03 2022).csv"
stations_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Stations v1 (07 03 2022).csv"
elections = ["2022-05-05"]
def address_record_to_dict(self, record):
if record.postcode in ["PO12 2EH"]:
return None
return super().address_record_to_dict(record)
|
Fix Gosport import script error
|
Fix Gosport import script error
|
Python
|
bsd-3-clause
|
DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "GOS"
addresses_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Districts v1 (07 03 2022).csv"
stations_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Stations v1 (07 03 2022).csv"
elections = ["2022-05-05"]
def address_record_to_dict(self, record):
- if record.addressline6 in ["PO12 2EH"]:
+ if record.postcode in ["PO12 2EH"]:
return None
return super().address_record_to_dict(record)
|
Fix Gosport import script error
|
## Code Before:
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "GOS"
addresses_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Districts v1 (07 03 2022).csv"
stations_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Stations v1 (07 03 2022).csv"
elections = ["2022-05-05"]
def address_record_to_dict(self, record):
if record.addressline6 in ["PO12 2EH"]:
return None
return super().address_record_to_dict(record)
## Instruction:
Fix Gosport import script error
## Code After:
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "GOS"
addresses_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Districts v1 (07 03 2022).csv"
stations_name = "2022-05-05/2022-03-07T15:47:28.644792/2022 Borough of Gosport - Democracy Club - Polling Stations v1 (07 03 2022).csv"
elections = ["2022-05-05"]
def address_record_to_dict(self, record):
if record.postcode in ["PO12 2EH"]:
return None
return super().address_record_to_dict(record)
|
# ... existing code ...
def address_record_to_dict(self, record):
if record.postcode in ["PO12 2EH"]:
return None
# ... rest of the code ...
|
a9796c68c24c3e8a059c54aad6eee2d0b61a9041
|
test/psyco.py
|
test/psyco.py
|
import _psyco
import sys
ticks = 0
depth = 10
funcs = {}
def f(frame, event, arg):
if event != 'call': return
c = frame.f_code.co_code
fn = frame.f_code.co_name
g = frame.f_globals
if not funcs.has_key(c):
funcs[c] = 1
if funcs[c] != None:
funcs[c] = funcs[c] + 1
if funcs[c] > ticks and g.has_key(fn):
g[fn] = _psyco.proxy(g[fn], depth)
funcs[c] = None
print 'psyco rebinding function:', fn
sys.setprofile(f)
|
import _psyco
_psyco.selective(1) # Argument is number of invocations before rebinding
# import sys
# ticks = 0
# depth = 10
# funcs = {}
# def f(frame, event, arg):
# if event != 'call': return
# print type(frame.f_globals)
# c = frame.f_code.co_code
# fn = frame.f_code.co_name
# g = frame.f_globals
# if not funcs.has_key(c):
# funcs[c] = 1
# if funcs[c] != None:
# funcs[c] = funcs[c] + 1
# if funcs[c] > ticks and g.has_key(fn):
# g[fn] = _psyco.proxy(g[fn], depth)
# funcs[c] = None
# print 'psyco rebinding function:', fn
# sys.setprofile(f)
|
Use c-version of the selective compilation
|
Use c-version of the selective compilation
|
Python
|
mit
|
tonysimpson/Ni,tonysimpson/Ni,tonysimpson/Ni,tonysimpson/Ni,tonysimpson/Ni
|
import _psyco
- import sys
+ _psyco.selective(1) # Argument is number of invocations before rebinding
+ # import sys
- ticks = 0
- depth = 10
- funcs = {}
+ # ticks = 0
+ # depth = 10
+ # funcs = {}
- def f(frame, event, arg):
- if event != 'call': return
- c = frame.f_code.co_code
- fn = frame.f_code.co_name
- g = frame.f_globals
- if not funcs.has_key(c):
- funcs[c] = 1
- if funcs[c] != None:
- funcs[c] = funcs[c] + 1
- if funcs[c] > ticks and g.has_key(fn):
- g[fn] = _psyco.proxy(g[fn], depth)
- funcs[c] = None
- print 'psyco rebinding function:', fn
- sys.setprofile(f)
+ # def f(frame, event, arg):
+ # if event != 'call': return
+ # print type(frame.f_globals)
+ # c = frame.f_code.co_code
+ # fn = frame.f_code.co_name
+ # g = frame.f_globals
+ # if not funcs.has_key(c):
+ # funcs[c] = 1
+ # if funcs[c] != None:
+ # funcs[c] = funcs[c] + 1
+ # if funcs[c] > ticks and g.has_key(fn):
+ # g[fn] = _psyco.proxy(g[fn], depth)
+ # funcs[c] = None
+ # print 'psyco rebinding function:', fn
+ # sys.setprofile(f)
+
+
|
Use c-version of the selective compilation
|
## Code Before:
import _psyco
import sys
ticks = 0
depth = 10
funcs = {}
def f(frame, event, arg):
if event != 'call': return
c = frame.f_code.co_code
fn = frame.f_code.co_name
g = frame.f_globals
if not funcs.has_key(c):
funcs[c] = 1
if funcs[c] != None:
funcs[c] = funcs[c] + 1
if funcs[c] > ticks and g.has_key(fn):
g[fn] = _psyco.proxy(g[fn], depth)
funcs[c] = None
print 'psyco rebinding function:', fn
sys.setprofile(f)
## Instruction:
Use c-version of the selective compilation
## Code After:
import _psyco
_psyco.selective(1) # Argument is number of invocations before rebinding
# import sys
# ticks = 0
# depth = 10
# funcs = {}
# def f(frame, event, arg):
# if event != 'call': return
# print type(frame.f_globals)
# c = frame.f_code.co_code
# fn = frame.f_code.co_name
# g = frame.f_globals
# if not funcs.has_key(c):
# funcs[c] = 1
# if funcs[c] != None:
# funcs[c] = funcs[c] + 1
# if funcs[c] > ticks and g.has_key(fn):
# g[fn] = _psyco.proxy(g[fn], depth)
# funcs[c] = None
# print 'psyco rebinding function:', fn
# sys.setprofile(f)
|
# ... existing code ...
import _psyco
_psyco.selective(1) # Argument is number of invocations before rebinding
# import sys
# ticks = 0
# depth = 10
# funcs = {}
# def f(frame, event, arg):
# if event != 'call': return
# print type(frame.f_globals)
# c = frame.f_code.co_code
# fn = frame.f_code.co_name
# g = frame.f_globals
# if not funcs.has_key(c):
# funcs[c] = 1
# if funcs[c] != None:
# funcs[c] = funcs[c] + 1
# if funcs[c] > ticks and g.has_key(fn):
# g[fn] = _psyco.proxy(g[fn], depth)
# funcs[c] = None
# print 'psyco rebinding function:', fn
# sys.setprofile(f)
# ... rest of the code ...
|
da097ed41010961cc0814d55d8784787f3ea8a63
|
skimage/util/arraypad.py
|
skimage/util/arraypad.py
|
from __future__ import division, absolute_import, print_function
from numpy import pad as numpy_pad
def pad(array, pad_width, mode, **kwargs):
return numpy_pad(array, pad_width, mode, **kwargs)
# Pull function info / docs from NumPy
pad.__doc__ = numpy_pad.__doc__
|
from __future__ import division, absolute_import, print_function
import numpy as np
def pad(array, pad_width, mode, **kwargs):
return np.pad(array, pad_width, mode, **kwargs)
# Pull function info / docs from NumPy
pad.__doc__ = np.pad.__doc__
|
Change import structure for doctests
|
Change import structure for doctests
|
Python
|
bsd-3-clause
|
rjeli/scikit-image,paalge/scikit-image,rjeli/scikit-image,vighneshbirodkar/scikit-image,vighneshbirodkar/scikit-image,vighneshbirodkar/scikit-image,paalge/scikit-image,rjeli/scikit-image,paalge/scikit-image
|
from __future__ import division, absolute_import, print_function
- from numpy import pad as numpy_pad
+ import numpy as np
def pad(array, pad_width, mode, **kwargs):
- return numpy_pad(array, pad_width, mode, **kwargs)
+ return np.pad(array, pad_width, mode, **kwargs)
# Pull function info / docs from NumPy
- pad.__doc__ = numpy_pad.__doc__
+ pad.__doc__ = np.pad.__doc__
|
Change import structure for doctests
|
## Code Before:
from __future__ import division, absolute_import, print_function
from numpy import pad as numpy_pad
def pad(array, pad_width, mode, **kwargs):
return numpy_pad(array, pad_width, mode, **kwargs)
# Pull function info / docs from NumPy
pad.__doc__ = numpy_pad.__doc__
## Instruction:
Change import structure for doctests
## Code After:
from __future__ import division, absolute_import, print_function
import numpy as np
def pad(array, pad_width, mode, **kwargs):
return np.pad(array, pad_width, mode, **kwargs)
# Pull function info / docs from NumPy
pad.__doc__ = np.pad.__doc__
|
...
import numpy as np
...
def pad(array, pad_width, mode, **kwargs):
return np.pad(array, pad_width, mode, **kwargs)
...
# Pull function info / docs from NumPy
pad.__doc__ = np.pad.__doc__
...
|
603f2204327c5cac8dbae0a567676465e1ab0f70
|
data/settings.py
|
data/settings.py
|
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'operations.db'),
}
}
INSTALLED_APPS = (
'data',
)
SECRET_KEY = '63cFWu$$lhT3bVP9U1k1Iv@Jo02SuM'
LOG_FILE = os.path.join(PROJECT_ROOT, 'sorter.logs')
SORTER_IGNORE_FILENAME = '.signore' # Should start with a dot
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
|
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'operations.db'),
}
}
INSTALLED_APPS = (
'data',
)
SECRET_KEY = '63cFWu$$lhT3bVP9U1k1Iv@Jo02SuM'
LOG_FILE = os.path.join(PROJECT_ROOT, 'sorter.logs')
SORTER_IGNORE_FILENAME = '.signore' # Should start with a dot
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
MIDDLEWARE_CLASSES = []
|
Set MIDDLEWARE_CLASSES to empty list
|
Set MIDDLEWARE_CLASSES to empty list
|
Python
|
bsd-3-clause
|
giantas/sorter,giantas/sorter
|
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'operations.db'),
}
}
INSTALLED_APPS = (
'data',
)
SECRET_KEY = '63cFWu$$lhT3bVP9U1k1Iv@Jo02SuM'
LOG_FILE = os.path.join(PROJECT_ROOT, 'sorter.logs')
SORTER_IGNORE_FILENAME = '.signore' # Should start with a dot
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
+ MIDDLEWARE_CLASSES = []
+
|
Set MIDDLEWARE_CLASSES to empty list
|
## Code Before:
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'operations.db'),
}
}
INSTALLED_APPS = (
'data',
)
SECRET_KEY = '63cFWu$$lhT3bVP9U1k1Iv@Jo02SuM'
LOG_FILE = os.path.join(PROJECT_ROOT, 'sorter.logs')
SORTER_IGNORE_FILENAME = '.signore' # Should start with a dot
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
## Instruction:
Set MIDDLEWARE_CLASSES to empty list
## Code After:
import os
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'operations.db'),
}
}
INSTALLED_APPS = (
'data',
)
SECRET_KEY = '63cFWu$$lhT3bVP9U1k1Iv@Jo02SuM'
LOG_FILE = os.path.join(PROJECT_ROOT, 'sorter.logs')
SORTER_IGNORE_FILENAME = '.signore' # Should start with a dot
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
MIDDLEWARE_CLASSES = []
|
# ... existing code ...
SORTER_FOLDER_IDENTITY_FILENAME = '.sorter' # Should start with a dot
MIDDLEWARE_CLASSES = []
# ... rest of the code ...
|
1421dd89b74bf753cf0b52a5e6fe200d221922b5
|
pirx/utils.py
|
pirx/utils.py
|
import os
def setting(name):
return name.upper()
def path(subpath):
project_root = os.path.dirname(os.path.realpath(__file__))
return os.path.join(project_root, subpath)
|
import os
def setting(name):
return name.upper()
def path(subpath):
import __main__
project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, subpath)
|
Fix 'path' function: use main's file as project root
|
Fix 'path' function: use main's file as project root
|
Python
|
mit
|
piotrekw/pirx
|
import os
def setting(name):
return name.upper()
def path(subpath):
+ import __main__
- project_root = os.path.dirname(os.path.realpath(__file__))
+ project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, subpath)
|
Fix 'path' function: use main's file as project root
|
## Code Before:
import os
def setting(name):
return name.upper()
def path(subpath):
project_root = os.path.dirname(os.path.realpath(__file__))
return os.path.join(project_root, subpath)
## Instruction:
Fix 'path' function: use main's file as project root
## Code After:
import os
def setting(name):
return name.upper()
def path(subpath):
import __main__
project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, subpath)
|
...
def path(subpath):
import __main__
project_root = os.path.dirname(os.path.realpath(__main__.__file__))
return os.path.join(project_root, subpath)
...
|
347681637c7c9d28ba1c787bb77da1296a02d13f
|
ckanext/archiver/default_settings.py
|
ckanext/archiver/default_settings.py
|
CKAN_CONFIG = '/home/okfn/pyenv/src/ckan/ckan.ini'
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Use this user name when requesting data from ckan
ARCHIVE_USER = u'okfn_maintenance'
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
|
CKAN_URL = 'http://127.0.0.1:5000'
# API key for the CKAN user that the archiver will authenticate as.
# This user must be a system administrator
API_KEY = ''
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
|
Change settings to use API key and CKAN URL
|
Change settings to use API key and CKAN URL
|
Python
|
mit
|
ckan/ckanext-archiver,ckan/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,ckan/ckanext-archiver,datagovuk/ckanext-archiver,datagovuk/ckanext-archiver,datagovuk/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver,DanePubliczneGovPl/ckanext-archiver
|
- CKAN_CONFIG = '/home/okfn/pyenv/src/ckan/ckan.ini'
+ CKAN_URL = 'http://127.0.0.1:5000'
+
+ # API key for the CKAN user that the archiver will authenticate as.
+ # This user must be a system administrator
+ API_KEY = ''
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
- # Use this user name when requesting data from ckan
- ARCHIVE_USER = u'okfn_maintenance'
-
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
|
Change settings to use API key and CKAN URL
|
## Code Before:
CKAN_CONFIG = '/home/okfn/pyenv/src/ckan/ckan.ini'
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Use this user name when requesting data from ckan
ARCHIVE_USER = u'okfn_maintenance'
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
## Instruction:
Change settings to use API key and CKAN URL
## Code After:
CKAN_URL = 'http://127.0.0.1:5000'
# API key for the CKAN user that the archiver will authenticate as.
# This user must be a system administrator
API_KEY = ''
# directory to save downloaded files to
ARCHIVE_DIR = '/tmp/archive'
# Max content-length of archived files, larger files will be ignored
MAX_CONTENT_LENGTH = 500000
|
# ... existing code ...
CKAN_URL = 'http://127.0.0.1:5000'
# API key for the CKAN user that the archiver will authenticate as.
# This user must be a system administrator
API_KEY = ''
# ... modified code ...
# Max content-length of archived files, larger files will be ignored
# ... rest of the code ...
|
23c29c4964286fc2ca8fb3a957a6e7810edb9d17
|
alexia/template/context_processors.py
|
alexia/template/context_processors.py
|
from __future__ import unicode_literals
from alexia.apps.organization.models import Organization
def organization(request):
return {
'organizations': Organization.objects.all(),
'current_organization': request.organization,
}
def permissions(request):
if request.user.is_superuser:
return {'is_tender': True, 'is_planner': True, 'is_manager': True, 'is_foundation_manager': True}
try:
membership = request.user.membership_set.get(organization=request.organization)
return {
'is_tender': membership.is_tender,
'is_planner': membership.is_planner,
'is_manager': membership.is_manager,
'is_foundation_manager': request.user.profile.is_foundation_manager,
}
except Organization.DoesNotExist:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
|
from __future__ import unicode_literals
from alexia.apps.organization.models import Organization
def organization(request):
return {
'organizations': Organization.objects.all(),
'current_organization': request.organization,
}
def permissions(request):
if request.user.is_superuser:
return {'is_tender': True, 'is_planner': True, 'is_manager': True, 'is_foundation_manager': True}
try:
if hasattr(request.user, "membership_set"):
membership = request.user.membership_set.get(organization=request.organization)
return {
'is_tender': membership.is_tender,
'is_planner': membership.is_planner,
'is_manager': membership.is_manager,
'is_foundation_manager': request.user.profile.is_foundation_manager,
}
else:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
except Organization.DoesNotExist:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
|
Fix AttributeError: 'AnonymousUser' object has no attribute 'membership_set'
|
Fix AttributeError: 'AnonymousUser' object has no attribute 'membership_set'
|
Python
|
bsd-3-clause
|
Inter-Actief/alexia,Inter-Actief/alexia,Inter-Actief/alexia,Inter-Actief/alexia
|
from __future__ import unicode_literals
from alexia.apps.organization.models import Organization
def organization(request):
return {
'organizations': Organization.objects.all(),
'current_organization': request.organization,
}
def permissions(request):
if request.user.is_superuser:
return {'is_tender': True, 'is_planner': True, 'is_manager': True, 'is_foundation_manager': True}
try:
+ if hasattr(request.user, "membership_set"):
- membership = request.user.membership_set.get(organization=request.organization)
+ membership = request.user.membership_set.get(organization=request.organization)
- return {
+ return {
- 'is_tender': membership.is_tender,
+ 'is_tender': membership.is_tender,
- 'is_planner': membership.is_planner,
+ 'is_planner': membership.is_planner,
- 'is_manager': membership.is_manager,
+ 'is_manager': membership.is_manager,
- 'is_foundation_manager': request.user.profile.is_foundation_manager,
+ 'is_foundation_manager': request.user.profile.is_foundation_manager,
- }
+ }
+ else:
+ return {
+ 'is_tender': False,
+ 'is_planner': False,
+ 'is_manager': False,
+ 'is_foundation_manager': False,
+ }
except Organization.DoesNotExist:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
|
Fix AttributeError: 'AnonymousUser' object has no attribute 'membership_set'
|
## Code Before:
from __future__ import unicode_literals
from alexia.apps.organization.models import Organization
def organization(request):
return {
'organizations': Organization.objects.all(),
'current_organization': request.organization,
}
def permissions(request):
if request.user.is_superuser:
return {'is_tender': True, 'is_planner': True, 'is_manager': True, 'is_foundation_manager': True}
try:
membership = request.user.membership_set.get(organization=request.organization)
return {
'is_tender': membership.is_tender,
'is_planner': membership.is_planner,
'is_manager': membership.is_manager,
'is_foundation_manager': request.user.profile.is_foundation_manager,
}
except Organization.DoesNotExist:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
## Instruction:
Fix AttributeError: 'AnonymousUser' object has no attribute 'membership_set'
## Code After:
from __future__ import unicode_literals
from alexia.apps.organization.models import Organization
def organization(request):
return {
'organizations': Organization.objects.all(),
'current_organization': request.organization,
}
def permissions(request):
if request.user.is_superuser:
return {'is_tender': True, 'is_planner': True, 'is_manager': True, 'is_foundation_manager': True}
try:
if hasattr(request.user, "membership_set"):
membership = request.user.membership_set.get(organization=request.organization)
return {
'is_tender': membership.is_tender,
'is_planner': membership.is_planner,
'is_manager': membership.is_manager,
'is_foundation_manager': request.user.profile.is_foundation_manager,
}
else:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
except Organization.DoesNotExist:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
|
...
try:
if hasattr(request.user, "membership_set"):
membership = request.user.membership_set.get(organization=request.organization)
return {
'is_tender': membership.is_tender,
'is_planner': membership.is_planner,
'is_manager': membership.is_manager,
'is_foundation_manager': request.user.profile.is_foundation_manager,
}
else:
return {
'is_tender': False,
'is_planner': False,
'is_manager': False,
'is_foundation_manager': False,
}
except Organization.DoesNotExist:
...
|
d07e1c020185fb118b628674234c4a1ebcc11836
|
binder/config.py
|
binder/config.py
|
c.ServerProxy.servers = {
'lab-dev': {
'command': [
'jupyter',
'lab',
'--no-browser',
'--dev-mode',
'--port={port}',
'--NotebookApp.token=""',
]
}
}
c.NotebookApp.default_url = '/lab-dev'
|
c.ServerProxy.servers = {
'lab-dev': {
'command': [
'jupyter',
'lab',
'--no-browser',
'--dev-mode',
'--port={port}',
'--NotebookApp.token=""',
'--NotebookApp.base_url={base_url}/lab-dev'
]
}
}
c.NotebookApp.default_url = '/lab-dev'
|
Set base_url for dev version of lab
|
Set base_url for dev version of lab
|
Python
|
bsd-3-clause
|
jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab,jupyter/jupyterlab
|
c.ServerProxy.servers = {
'lab-dev': {
'command': [
'jupyter',
'lab',
'--no-browser',
'--dev-mode',
'--port={port}',
'--NotebookApp.token=""',
+ '--NotebookApp.base_url={base_url}/lab-dev'
]
}
}
c.NotebookApp.default_url = '/lab-dev'
|
Set base_url for dev version of lab
|
## Code Before:
c.ServerProxy.servers = {
'lab-dev': {
'command': [
'jupyter',
'lab',
'--no-browser',
'--dev-mode',
'--port={port}',
'--NotebookApp.token=""',
]
}
}
c.NotebookApp.default_url = '/lab-dev'
## Instruction:
Set base_url for dev version of lab
## Code After:
c.ServerProxy.servers = {
'lab-dev': {
'command': [
'jupyter',
'lab',
'--no-browser',
'--dev-mode',
'--port={port}',
'--NotebookApp.token=""',
'--NotebookApp.base_url={base_url}/lab-dev'
]
}
}
c.NotebookApp.default_url = '/lab-dev'
|
# ... existing code ...
'--NotebookApp.token=""',
'--NotebookApp.base_url={base_url}/lab-dev'
]
# ... rest of the code ...
|
55e316a45256d054d19425015ef13868a84c5ff1
|
src/pip/_internal/resolution/resolvelib/reporter.py
|
src/pip/_internal/resolution/resolvelib/reporter.py
|
from collections import defaultdict
from logging import getLogger
from pip._vendor.resolvelib.reporters import BaseReporter
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict
from .base import Candidate
logger = getLogger(__name__)
class PipReporter(BaseReporter):
def __init__(self):
# type: () -> None
self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int]
self._messages_at_backtrack = {
1: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
8: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide the "
"dependency resolver with stricter constraints to reduce runtime."
"If you want to abort this run, you can press Ctrl + C to do so."
)
}
def backtracking(self, candidate):
# type: (Candidate) -> None
self.backtracks_by_package[candidate.name] += 1
count = self.backtracks_by_package[candidate.name]
if count not in self._messages_at_backtrack:
return
message = self._messages_at_backtrack[count]
logger.info("INFO: %s", message)
|
from collections import defaultdict
from logging import getLogger
from pip._vendor.resolvelib.reporters import BaseReporter
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict
from .base import Candidate
logger = getLogger(__name__)
class PipReporter(BaseReporter):
def __init__(self):
# type: () -> None
self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int]
self._messages_at_backtrack = {
1: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
8: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide the "
"dependency resolver with stricter constraints to reduce runtime."
"If you want to abort this run, you can press Ctrl + C to do so."
"To improve how pip performs, tell us that this happened here: "
"https://pip.pypa.io/surveys/backtracking"
)
}
def backtracking(self, candidate):
# type: (Candidate) -> None
self.backtracks_by_package[candidate.name] += 1
count = self.backtracks_by_package[candidate.name]
if count not in self._messages_at_backtrack:
return
message = self._messages_at_backtrack[count]
logger.info("INFO: %s", message)
|
Add the last line to the info message
|
Add the last line to the info message
|
Python
|
mit
|
sbidoul/pip,pradyunsg/pip,pypa/pip,pypa/pip,sbidoul/pip,pfmoore/pip,pfmoore/pip,pradyunsg/pip
|
from collections import defaultdict
from logging import getLogger
from pip._vendor.resolvelib.reporters import BaseReporter
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict
from .base import Candidate
logger = getLogger(__name__)
class PipReporter(BaseReporter):
def __init__(self):
# type: () -> None
self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int]
self._messages_at_backtrack = {
1: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
8: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide the "
"dependency resolver with stricter constraints to reduce runtime."
"If you want to abort this run, you can press Ctrl + C to do so."
+ "To improve how pip performs, tell us that this happened here: "
+ "https://pip.pypa.io/surveys/backtracking"
)
}
def backtracking(self, candidate):
# type: (Candidate) -> None
self.backtracks_by_package[candidate.name] += 1
count = self.backtracks_by_package[candidate.name]
if count not in self._messages_at_backtrack:
return
message = self._messages_at_backtrack[count]
logger.info("INFO: %s", message)
|
Add the last line to the info message
|
## Code Before:
from collections import defaultdict
from logging import getLogger
from pip._vendor.resolvelib.reporters import BaseReporter
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict
from .base import Candidate
logger = getLogger(__name__)
class PipReporter(BaseReporter):
def __init__(self):
# type: () -> None
self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int]
self._messages_at_backtrack = {
1: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
8: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide the "
"dependency resolver with stricter constraints to reduce runtime."
"If you want to abort this run, you can press Ctrl + C to do so."
)
}
def backtracking(self, candidate):
# type: (Candidate) -> None
self.backtracks_by_package[candidate.name] += 1
count = self.backtracks_by_package[candidate.name]
if count not in self._messages_at_backtrack:
return
message = self._messages_at_backtrack[count]
logger.info("INFO: %s", message)
## Instruction:
Add the last line to the info message
## Code After:
from collections import defaultdict
from logging import getLogger
from pip._vendor.resolvelib.reporters import BaseReporter
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict
from .base import Candidate
logger = getLogger(__name__)
class PipReporter(BaseReporter):
def __init__(self):
# type: () -> None
self.backtracks_by_package = defaultdict(int) # type: DefaultDict[str, int]
self._messages_at_backtrack = {
1: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
8: (
"pip is looking at multiple versions of this package to determine "
"which version is compatible with other requirements. "
"This could take a while."
),
13: (
"This is taking longer than usual. You might need to provide the "
"dependency resolver with stricter constraints to reduce runtime."
"If you want to abort this run, you can press Ctrl + C to do so."
"To improve how pip performs, tell us that this happened here: "
"https://pip.pypa.io/surveys/backtracking"
)
}
def backtracking(self, candidate):
# type: (Candidate) -> None
self.backtracks_by_package[candidate.name] += 1
count = self.backtracks_by_package[candidate.name]
if count not in self._messages_at_backtrack:
return
message = self._messages_at_backtrack[count]
logger.info("INFO: %s", message)
|
// ... existing code ...
"If you want to abort this run, you can press Ctrl + C to do so."
"To improve how pip performs, tell us that this happened here: "
"https://pip.pypa.io/surveys/backtracking"
)
// ... rest of the code ...
|
6765cefc1a5a928b3cff16c0f1014096f82c3d3b
|
test/test_services.py
|
test/test_services.py
|
import pytest
@pytest.mark.parametrize("name, enabled, running", [
("cron", "enabled", "running"),
("docker", "enabled", "running"),
("firewalld", "enabled", "running"),
("haveged", "enabled", "running"),
("ssh", "enabled", "running"),
])
def test_services(Service, name, enabled, running):
is_enabled = Service(name).is_enabled
print(is_enabled)
if enabled == "enabled":
assert is_enabled
else:
assert not is_enabled
is_running = Service(name).is_running
print(is_running)
if running == "running":
assert is_running
else:
assert not is_running
|
import pytest
@pytest.mark.parametrize("name, enabled, running", [
("cron", "enabled", "running"),
("docker", "enabled", "running"),
("firewalld", "enabled", "running"),
("haveged", "enabled", "running"),
("ssh", "enabled", "running"),
])
def test_services(host, name, enabled, running):
svc = host.service(name)
is_enabled = svc.is_enabled
print(is_enabled)
if enabled == "enabled":
assert is_enabled
else:
assert not is_enabled
is_running = svc.is_running
print(is_running)
if running == "running":
assert is_running
else:
assert not is_running
|
Change test function as existing method deprecated
|
Change test function as existing method deprecated
|
Python
|
mit
|
wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build
|
import pytest
@pytest.mark.parametrize("name, enabled, running", [
("cron", "enabled", "running"),
("docker", "enabled", "running"),
("firewalld", "enabled", "running"),
("haveged", "enabled", "running"),
("ssh", "enabled", "running"),
])
- def test_services(Service, name, enabled, running):
+ def test_services(host, name, enabled, running):
+
+ svc = host.service(name)
+
- is_enabled = Service(name).is_enabled
+ is_enabled = svc.is_enabled
print(is_enabled)
if enabled == "enabled":
assert is_enabled
else:
assert not is_enabled
- is_running = Service(name).is_running
+ is_running = svc.is_running
print(is_running)
if running == "running":
assert is_running
else:
assert not is_running
-
|
Change test function as existing method deprecated
|
## Code Before:
import pytest
@pytest.mark.parametrize("name, enabled, running", [
("cron", "enabled", "running"),
("docker", "enabled", "running"),
("firewalld", "enabled", "running"),
("haveged", "enabled", "running"),
("ssh", "enabled", "running"),
])
def test_services(Service, name, enabled, running):
is_enabled = Service(name).is_enabled
print(is_enabled)
if enabled == "enabled":
assert is_enabled
else:
assert not is_enabled
is_running = Service(name).is_running
print(is_running)
if running == "running":
assert is_running
else:
assert not is_running
## Instruction:
Change test function as existing method deprecated
## Code After:
import pytest
@pytest.mark.parametrize("name, enabled, running", [
("cron", "enabled", "running"),
("docker", "enabled", "running"),
("firewalld", "enabled", "running"),
("haveged", "enabled", "running"),
("ssh", "enabled", "running"),
])
def test_services(host, name, enabled, running):
svc = host.service(name)
is_enabled = svc.is_enabled
print(is_enabled)
if enabled == "enabled":
assert is_enabled
else:
assert not is_enabled
is_running = svc.is_running
print(is_running)
if running == "running":
assert is_running
else:
assert not is_running
|
...
def test_services(host, name, enabled, running):
svc = host.service(name)
is_enabled = svc.is_enabled
print(is_enabled)
...
is_running = svc.is_running
print(is_running)
...
assert not is_running
...
|
8e75605e0511b85dfd500b644613739f29705da6
|
cfnf.py
|
cfnf.py
|
import sublime, sublime_plugin
import time
class cfnewfile(sublime_plugin.TextCommand):
def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
self.view.insert(edit,0,"<!---\r\n Name:\r\n Description:\r\n Written By:\r\n Date Created: "+localtime+"\r\n History:\r\n--->\r\n")
|
import sublime, sublime_plugin
import time
class cfnfCommand(sublime_plugin.WindowCommand):
def run(self):
a = self.window.new_file()
a.run_command("addheader")
class addheaderCommand(sublime_plugin.TextCommand):
def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
self.view.insert(edit,0,"<!---\n Name:\n Description:\n Written By:\n Date Created: "+localtime+"\n History:\n--->\n")
|
Send text to new file
|
Send text to new file
|
Python
|
bsd-2-clause
|
dwkd/SublimeCFNewFile
|
+
import sublime, sublime_plugin
import time
- class cfnewfile(sublime_plugin.TextCommand):
+ class cfnfCommand(sublime_plugin.WindowCommand):
+ def run(self):
+ a = self.window.new_file()
+ a.run_command("addheader")
+
+ class addheaderCommand(sublime_plugin.TextCommand):
- def run(self, edit):
+ def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
- self.view.insert(edit,0,"<!---\r\n Name:\r\n Description:\r\n Written By:\r\n Date Created: "+localtime+"\r\n History:\r\n--->\r\n")
+ self.view.insert(edit,0,"<!---\n Name:\n Description:\n Written By:\n Date Created: "+localtime+"\n History:\n--->\n")
|
Send text to new file
|
## Code Before:
import sublime, sublime_plugin
import time
class cfnewfile(sublime_plugin.TextCommand):
def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
self.view.insert(edit,0,"<!---\r\n Name:\r\n Description:\r\n Written By:\r\n Date Created: "+localtime+"\r\n History:\r\n--->\r\n")
## Instruction:
Send text to new file
## Code After:
import sublime, sublime_plugin
import time
class cfnfCommand(sublime_plugin.WindowCommand):
def run(self):
a = self.window.new_file()
a.run_command("addheader")
class addheaderCommand(sublime_plugin.TextCommand):
def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
self.view.insert(edit,0,"<!---\n Name:\n Description:\n Written By:\n Date Created: "+localtime+"\n History:\n--->\n")
|
# ... existing code ...
import sublime, sublime_plugin
# ... modified code ...
class cfnfCommand(sublime_plugin.WindowCommand):
def run(self):
a = self.window.new_file()
a.run_command("addheader")
class addheaderCommand(sublime_plugin.TextCommand):
def run(self, edit):
localtime = time.asctime( time.localtime(time.time()) )
self.view.insert(edit,0,"<!---\n Name:\n Description:\n Written By:\n Date Created: "+localtime+"\n History:\n--->\n")
# ... rest of the code ...
|
351c05b6e474b266a7594a775cb48cd7cfe0b833
|
shapely/linref.py
|
shapely/linref.py
|
from shapely.topology import Delegating
class LinearRefBase(Delegating):
def _validate_line(self, ob):
super(LinearRefBase, self)._validate(ob)
try:
assert ob.geom_type in ['LineString', 'MultiLineString']
except AssertionError:
raise TypeError("Only linear types support this operation")
class ProjectOp(LinearRefBase):
def __call__(self, this, other):
self._validate_line(this)
self._validate(other)
return self.fn(this._geom, other._geom)
class InterpolateOp(LinearRefBase):
def __call__(self, this, distance):
self._validate_line(this)
return self.fn(this._geom, distance)
|
from shapely.topology import Delegating
class LinearRefBase(Delegating):
def _validate_line(self, ob):
super(LinearRefBase, self)._validate(ob)
if not ob.geom_type in ['LinearRing', 'LineString', 'MultiLineString']:
raise TypeError("Only linear types support this operation")
class ProjectOp(LinearRefBase):
def __call__(self, this, other):
self._validate_line(this)
self._validate(other)
return self.fn(this._geom, other._geom)
class InterpolateOp(LinearRefBase):
def __call__(self, this, distance):
self._validate_line(this)
return self.fn(this._geom, distance)
|
Allow linear referencing on rings.
|
Allow linear referencing on rings.
Closes #286.
Eliminating the assert is good for optimization reasons, too.
|
Python
|
bsd-3-clause
|
abali96/Shapely,mouadino/Shapely,mindw/shapely,abali96/Shapely,jdmcbr/Shapely,jdmcbr/Shapely,mindw/shapely,mouadino/Shapely
|
from shapely.topology import Delegating
class LinearRefBase(Delegating):
def _validate_line(self, ob):
super(LinearRefBase, self)._validate(ob)
- try:
- assert ob.geom_type in ['LineString', 'MultiLineString']
+ if not ob.geom_type in ['LinearRing', 'LineString', 'MultiLineString']:
- except AssertionError:
raise TypeError("Only linear types support this operation")
class ProjectOp(LinearRefBase):
def __call__(self, this, other):
self._validate_line(this)
self._validate(other)
return self.fn(this._geom, other._geom)
class InterpolateOp(LinearRefBase):
def __call__(self, this, distance):
self._validate_line(this)
return self.fn(this._geom, distance)
|
Allow linear referencing on rings.
|
## Code Before:
from shapely.topology import Delegating
class LinearRefBase(Delegating):
def _validate_line(self, ob):
super(LinearRefBase, self)._validate(ob)
try:
assert ob.geom_type in ['LineString', 'MultiLineString']
except AssertionError:
raise TypeError("Only linear types support this operation")
class ProjectOp(LinearRefBase):
def __call__(self, this, other):
self._validate_line(this)
self._validate(other)
return self.fn(this._geom, other._geom)
class InterpolateOp(LinearRefBase):
def __call__(self, this, distance):
self._validate_line(this)
return self.fn(this._geom, distance)
## Instruction:
Allow linear referencing on rings.
## Code After:
from shapely.topology import Delegating
class LinearRefBase(Delegating):
def _validate_line(self, ob):
super(LinearRefBase, self)._validate(ob)
if not ob.geom_type in ['LinearRing', 'LineString', 'MultiLineString']:
raise TypeError("Only linear types support this operation")
class ProjectOp(LinearRefBase):
def __call__(self, this, other):
self._validate_line(this)
self._validate(other)
return self.fn(this._geom, other._geom)
class InterpolateOp(LinearRefBase):
def __call__(self, this, distance):
self._validate_line(this)
return self.fn(this._geom, distance)
|
...
super(LinearRefBase, self)._validate(ob)
if not ob.geom_type in ['LinearRing', 'LineString', 'MultiLineString']:
raise TypeError("Only linear types support this operation")
...
|
9c2951d794bb27952606cae77da1ebcd0d651e72
|
aiodownload/api.py
|
aiodownload/api.py
|
from aiodownload import AioDownloadBundle, AioDownload
import asyncio
def one(url, download=None):
return [s for s in swarm([url], download=download)][0]
def swarm(urls, download=None):
return [e for e in each(urls, download=download)]
def each(iterable, url_map=None, download=None):
url_map = url_map or _url_map
download = download or AioDownload()
tasks = []
for i in iterable:
url = url_map(i)
info = None if i == url else i
tasks.append(
download._loop.create_task(
AioDownload(url, info=info)
)
)
for task_set in download._loop.run_until_complete(asyncio.wait(tasks)):
for task in task_set:
yield task.result()
def _url_map(x):
return str(x)
|
from aiodownload import AioDownloadBundle, AioDownload
import asyncio
def one(url, download=None):
return [s for s in swarm([url], download=download)][0]
def swarm(urls, download=None):
return [e for e in each(urls, download=download)]
def each(iterable, url_map=None, download=None):
url_map = url_map or _url_map
download = download or AioDownload()
tasks = []
for i in iterable:
url = url_map(i)
info = None if i == url else i
tasks.append(
download._loop.create_task(
download.main(AioDownloadBundle(url, info=info))
)
)
for task_set in download._loop.run_until_complete(asyncio.wait(tasks)):
for task in task_set:
yield task.result()
def _url_map(x):
return str(x)
|
Fix - needed to provide create_task a function, not a class
|
Fix - needed to provide create_task a function, not a class
|
Python
|
mit
|
jelloslinger/aiodownload
|
from aiodownload import AioDownloadBundle, AioDownload
import asyncio
def one(url, download=None):
return [s for s in swarm([url], download=download)][0]
def swarm(urls, download=None):
return [e for e in each(urls, download=download)]
def each(iterable, url_map=None, download=None):
url_map = url_map or _url_map
download = download or AioDownload()
tasks = []
for i in iterable:
url = url_map(i)
info = None if i == url else i
tasks.append(
download._loop.create_task(
- AioDownload(url, info=info)
+ download.main(AioDownloadBundle(url, info=info))
)
)
for task_set in download._loop.run_until_complete(asyncio.wait(tasks)):
for task in task_set:
yield task.result()
def _url_map(x):
return str(x)
|
Fix - needed to provide create_task a function, not a class
|
## Code Before:
from aiodownload import AioDownloadBundle, AioDownload
import asyncio
def one(url, download=None):
return [s for s in swarm([url], download=download)][0]
def swarm(urls, download=None):
return [e for e in each(urls, download=download)]
def each(iterable, url_map=None, download=None):
url_map = url_map or _url_map
download = download or AioDownload()
tasks = []
for i in iterable:
url = url_map(i)
info = None if i == url else i
tasks.append(
download._loop.create_task(
AioDownload(url, info=info)
)
)
for task_set in download._loop.run_until_complete(asyncio.wait(tasks)):
for task in task_set:
yield task.result()
def _url_map(x):
return str(x)
## Instruction:
Fix - needed to provide create_task a function, not a class
## Code After:
from aiodownload import AioDownloadBundle, AioDownload
import asyncio
def one(url, download=None):
return [s for s in swarm([url], download=download)][0]
def swarm(urls, download=None):
return [e for e in each(urls, download=download)]
def each(iterable, url_map=None, download=None):
url_map = url_map or _url_map
download = download or AioDownload()
tasks = []
for i in iterable:
url = url_map(i)
info = None if i == url else i
tasks.append(
download._loop.create_task(
download.main(AioDownloadBundle(url, info=info))
)
)
for task_set in download._loop.run_until_complete(asyncio.wait(tasks)):
for task in task_set:
yield task.result()
def _url_map(x):
return str(x)
|
...
download._loop.create_task(
download.main(AioDownloadBundle(url, info=info))
)
...
|
70f568a97f87f039fe06d74e1cf46040e0b6b817
|
tests/test_etcd3.py
|
tests/test_etcd3.py
|
import pytest
import etcd3
class TestEtcd3(object):
@classmethod
def setup_class(cls):
pass
def test_client_stub(self):
etcd = etcd3.client()
assert etcd is not None
def test_get_unknown_key(self):
etcd = etcd3.client()
with pytest.raises(etcd3.exceptions.KeyNotFoundError):
etcd.get('probably-invalid-key')
def test_get_key(self):
etcd = etcd3.client()
etcd.get('doot')
def test_put_key(self):
etcd = etcd3.client()
etcd.put('doot', 'this is a doot')
@classmethod
def teardown_class(cls):
pass
|
import os
import pytest
import etcd3
class TestEtcd3(object):
@classmethod
def setup_class(cls):
pass
def test_client_stub(self):
etcd = etcd3.client()
assert etcd is not None
def test_get_unknown_key(self):
etcd = etcd3.client()
with pytest.raises(etcd3.exceptions.KeyNotFoundError):
etcd.get('probably-invalid-key')
def test_get_key(self):
os.system("etcdctl put /doot/a_key some_value")
etcd = etcd3.client()
etcd.get('/doot/a_key')
def test_put_key(self):
etcd = etcd3.client()
etcd.put('/doot', 'this is a doot')
@classmethod
def teardown_class(cls):
os.system("etcdctl -w json del --prefix /doot")
|
Clean out testing keys with etcdctl
|
Clean out testing keys with etcdctl
|
Python
|
apache-2.0
|
kragniz/python-etcd3
|
+
+ import os
import pytest
import etcd3
class TestEtcd3(object):
@classmethod
def setup_class(cls):
pass
def test_client_stub(self):
etcd = etcd3.client()
assert etcd is not None
def test_get_unknown_key(self):
etcd = etcd3.client()
with pytest.raises(etcd3.exceptions.KeyNotFoundError):
etcd.get('probably-invalid-key')
def test_get_key(self):
+ os.system("etcdctl put /doot/a_key some_value")
etcd = etcd3.client()
- etcd.get('doot')
+ etcd.get('/doot/a_key')
def test_put_key(self):
etcd = etcd3.client()
- etcd.put('doot', 'this is a doot')
+ etcd.put('/doot', 'this is a doot')
@classmethod
def teardown_class(cls):
- pass
+ os.system("etcdctl -w json del --prefix /doot")
|
Clean out testing keys with etcdctl
|
## Code Before:
import pytest
import etcd3
class TestEtcd3(object):
@classmethod
def setup_class(cls):
pass
def test_client_stub(self):
etcd = etcd3.client()
assert etcd is not None
def test_get_unknown_key(self):
etcd = etcd3.client()
with pytest.raises(etcd3.exceptions.KeyNotFoundError):
etcd.get('probably-invalid-key')
def test_get_key(self):
etcd = etcd3.client()
etcd.get('doot')
def test_put_key(self):
etcd = etcd3.client()
etcd.put('doot', 'this is a doot')
@classmethod
def teardown_class(cls):
pass
## Instruction:
Clean out testing keys with etcdctl
## Code After:
import os
import pytest
import etcd3
class TestEtcd3(object):
@classmethod
def setup_class(cls):
pass
def test_client_stub(self):
etcd = etcd3.client()
assert etcd is not None
def test_get_unknown_key(self):
etcd = etcd3.client()
with pytest.raises(etcd3.exceptions.KeyNotFoundError):
etcd.get('probably-invalid-key')
def test_get_key(self):
os.system("etcdctl put /doot/a_key some_value")
etcd = etcd3.client()
etcd.get('/doot/a_key')
def test_put_key(self):
etcd = etcd3.client()
etcd.put('/doot', 'this is a doot')
@classmethod
def teardown_class(cls):
os.system("etcdctl -w json del --prefix /doot")
|
# ... existing code ...
import os
# ... modified code ...
def test_get_key(self):
os.system("etcdctl put /doot/a_key some_value")
etcd = etcd3.client()
etcd.get('/doot/a_key')
...
etcd = etcd3.client()
etcd.put('/doot', 'this is a doot')
...
def teardown_class(cls):
os.system("etcdctl -w json del --prefix /doot")
# ... rest of the code ...
|
7b90d75f260e76baf8b57840d96bb36b62e2c56c
|
__init__.py
|
__init__.py
|
from __future__ import division, unicode_literals
import bikeshed
import os
import subprocess
def main():
scriptPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(scriptPath, "data")
bikeshed.config.quiet = False
#bikeshed.update.update(path=dataPath)
#bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
print subprocess.check_output("git add .", shell=True)
print subprocess.check_output("git push", shell=True)
if __name__ == "__main__":
main()
|
from __future__ import division, unicode_literals
import bikeshed
import os
import subprocess
def main():
scriptPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(scriptPath, "data")
bikeshed.config.quiet = False
bikeshed.update.update(path=dataPath)
bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
subprocess.check_call("git add data", shell=True)
subprocess.check_call("git commit -m 'update data'", shell=True)
subprocess.check_call("git push", shell=True)
if __name__ == "__main__":
main()
|
Update script with proper git-ing.
|
Update script with proper git-ing.
|
Python
|
mit
|
tabatkins/bikeshed-data
|
from __future__ import division, unicode_literals
import bikeshed
import os
import subprocess
def main():
scriptPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(scriptPath, "data")
bikeshed.config.quiet = False
- #bikeshed.update.update(path=dataPath)
+ bikeshed.update.update(path=dataPath)
- #bikeshed.update.createManifest(path=dataPath)
+ bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
- print subprocess.check_output("git add .", shell=True)
+ subprocess.check_call("git add data", shell=True)
+ subprocess.check_call("git commit -m 'update data'", shell=True)
- print subprocess.check_output("git push", shell=True)
+ subprocess.check_call("git push", shell=True)
if __name__ == "__main__":
- main()
+ main()
+
|
Update script with proper git-ing.
|
## Code Before:
from __future__ import division, unicode_literals
import bikeshed
import os
import subprocess
def main():
scriptPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(scriptPath, "data")
bikeshed.config.quiet = False
#bikeshed.update.update(path=dataPath)
#bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
print subprocess.check_output("git add .", shell=True)
print subprocess.check_output("git push", shell=True)
if __name__ == "__main__":
main()
## Instruction:
Update script with proper git-ing.
## Code After:
from __future__ import division, unicode_literals
import bikeshed
import os
import subprocess
def main():
scriptPath = os.path.dirname(os.path.realpath(__file__))
dataPath = os.path.join(scriptPath, "data")
bikeshed.config.quiet = False
bikeshed.update.update(path=dataPath)
bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
subprocess.check_call("git add data", shell=True)
subprocess.check_call("git commit -m 'update data'", shell=True)
subprocess.check_call("git push", shell=True)
if __name__ == "__main__":
main()
|
// ... existing code ...
bikeshed.config.quiet = False
bikeshed.update.update(path=dataPath)
bikeshed.update.createManifest(path=dataPath)
os.chdir(scriptPath)
subprocess.check_call("git add data", shell=True)
subprocess.check_call("git commit -m 'update data'", shell=True)
subprocess.check_call("git push", shell=True)
// ... modified code ...
if __name__ == "__main__":
main()
// ... rest of the code ...
|
b33222fd9d16efa88864d0c1f28cce9d0a8c3f68
|
fastentrypoints.py
|
fastentrypoints.py
|
'''
Monkey patch setuptools to write faster console_scripts with this format:
from mymodule import entry_function
entry_function()
This is better.
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import shutil
import sys
dests = sys.argv[1:] or ['.']
print(__name__)
for dst in dests:
shutil.copy(__file__, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
manifest.write('\ninclude fastentrypoints.py')
|
'''
Monkey patch setuptools to write faster console_scripts with this format:
from mymodule import entry_function
entry_function()
This is better.
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
manifest.write('\ninclude fastentrypoints.py')
|
Make sure that .py file is used, even if .pyc got executed
|
Make sure that .py file is used, even if .pyc got executed
If python already byte-compiled the source code to .pyc file,
the __file__ points to .pyc, rather than to .py, which breaks the
copying mechanism.
Use regex substitution to make sure we're always copying the original
source file.
|
Python
|
bsd-2-clause
|
ninjaaron/fast-entry_points
|
'''
Monkey patch setuptools to write faster console_scripts with this format:
from mymodule import entry_function
entry_function()
This is better.
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
+ import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
- print(__name__)
+ filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
- shutil.copy(__file__, dst)
+ shutil.copy(filename, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
manifest.write('\ninclude fastentrypoints.py')
|
Make sure that .py file is used, even if .pyc got executed
|
## Code Before:
'''
Monkey patch setuptools to write faster console_scripts with this format:
from mymodule import entry_function
entry_function()
This is better.
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import shutil
import sys
dests = sys.argv[1:] or ['.']
print(__name__)
for dst in dests:
shutil.copy(__file__, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
manifest.write('\ninclude fastentrypoints.py')
## Instruction:
Make sure that .py file is used, even if .pyc got executed
## Code After:
'''
Monkey patch setuptools to write faster console_scripts with this format:
from mymodule import entry_function
entry_function()
This is better.
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
manifest.write('\ninclude fastentrypoints.py')
|
// ... existing code ...
def main():
import re
import shutil
// ... modified code ...
dests = sys.argv[1:] or ['.']
filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
with open(dst + '/MANIFEST.in', 'a') as manifest:
// ... rest of the code ...
|
31f1c65d6505bc443fdb1d6ccd4849b175788f04
|
gargbot_3000/config.py
|
gargbot_3000/config.py
|
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
db_host = os.environ["db_host"]
db_user = os.environ["db_user"]
db_passwd = os.environ["db_passwd"]
db_name = os.environ["db_name"]
dropbox_token = os.environ["dropbox_token"]
tz = pytz.timezone(os.environ["tz"])
app_id = os.environ["app_id"]
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
|
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
load_dotenv()
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
print(home)
db_host = os.environ["db_host"]
db_user = os.environ["db_user"]
db_passwd = os.environ["db_passwd"]
db_name = os.environ["db_name"]
dropbox_token = os.environ["dropbox_token"]
tz = pytz.timezone(os.environ["tz"])
app_id = os.environ["app_id"]
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
|
Remove explicit path from load_dotenv call
|
Remove explicit path from load_dotenv call
|
Python
|
mit
|
eirki/gargbot_3000,eirki/gargbot_3000,eirki/gargbot_3000,eirki/gargbot_3000
|
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
+ load_dotenv()
- env_path = Path('.') / '.env'
- load_dotenv(dotenv_path=env_path)
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
+ print(home)
db_host = os.environ["db_host"]
db_user = os.environ["db_user"]
db_passwd = os.environ["db_passwd"]
db_name = os.environ["db_name"]
dropbox_token = os.environ["dropbox_token"]
tz = pytz.timezone(os.environ["tz"])
app_id = os.environ["app_id"]
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
|
Remove explicit path from load_dotenv call
|
## Code Before:
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
db_host = os.environ["db_host"]
db_user = os.environ["db_user"]
db_passwd = os.environ["db_passwd"]
db_name = os.environ["db_name"]
dropbox_token = os.environ["dropbox_token"]
tz = pytz.timezone(os.environ["tz"])
app_id = os.environ["app_id"]
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
## Instruction:
Remove explicit path from load_dotenv call
## Code After:
import os
import datetime as dt
from pathlib import Path
import pytz
from dotenv import load_dotenv
load_dotenv()
slack_verification_token = os.environ["slack_verification_token"]
slack_bot_user_token = os.environ["slack_bot_user_token"]
bot_id = os.environ["bot_id"]
bot_name = os.environ["bot_name"]
home = Path(os.getenv("home_folder", os.getcwd()))
print(home)
db_host = os.environ["db_host"]
db_user = os.environ["db_user"]
db_passwd = os.environ["db_passwd"]
db_name = os.environ["db_name"]
dropbox_token = os.environ["dropbox_token"]
tz = pytz.timezone(os.environ["tz"])
app_id = os.environ["app_id"]
test_channel = os.environ["test_channel"]
main_channel = os.environ["main_channel"]
countdown_message = os.environ["countdown_message"]
ongoing_message = os.environ["ongoing_message"]
finished_message = os.environ["finished_message"]
countdown_date = dt.datetime.fromtimestamp(int(os.environ["countdown_date"]), tz=tz)
countdown_args = os.environ["countdown_args"].split(", ")
|
// ... existing code ...
load_dotenv()
// ... modified code ...
home = Path(os.getenv("home_folder", os.getcwd()))
print(home)
// ... rest of the code ...
|
8f98b52ec670ecfe89f243348f7815b0ae71eed7
|
gog_utils/gol_connection.py
|
gog_utils/gol_connection.py
|
"""Module hosting class representing connection to GoL."""
import json
import requests
import os
import stat
WEBSITE_URL = "http://www.gogonlinux.com"
AVAILABLE_GAMES = "/available"
BETA_GAMES = "/available-beta"
def obtain_available_games():
"""Returns JSON list of all available games."""
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
return json.loads(resp.text)
def obtain_beta_available_games():
"""Obtains JSON list of all available beta games."""
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
return json.loads(resp.text)
def download_script(target, url):
"""Function to download data from url to target."""
reqs = requests.get(url)
with open(target, "w+") as file_handle:
file_handle.write(reqs.content)
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
"""Module hosting class representing connection to GoL."""
import json
import requests
import os
import stat
WEBSITE_URL = "http://www.gogonlinux.com"
AVAILABLE_GAMES = "/available"
BETA_GAMES = "/available-beta"
def obtain_available_games():
"""Returns JSON list of all available games."""
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
def obtain_beta_available_games():
"""Obtains JSON list of all available beta games."""
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
def download_script(target, url):
"""Function to download data from url to target."""
reqs = requests.get(url)
with open(target, "w+") as file_handle:
file_handle.write(reqs.content) #pylint: disable=E1103
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
Disable falsely reported pylint errors due to unresolved library type
|
Disable falsely reported pylint errors due to unresolved library type
Signed-off-by: Morgawr <[email protected]>
|
Python
|
bsd-3-clause
|
Morgawr/gogonlinux,Morgawr/gogonlinux
|
"""Module hosting class representing connection to GoL."""
import json
import requests
import os
import stat
WEBSITE_URL = "http://www.gogonlinux.com"
AVAILABLE_GAMES = "/available"
BETA_GAMES = "/available-beta"
def obtain_available_games():
"""Returns JSON list of all available games."""
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
- return json.loads(resp.text)
+ return json.loads(resp.text) #pylint: disable=E1103
def obtain_beta_available_games():
"""Obtains JSON list of all available beta games."""
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
- return json.loads(resp.text)
+ return json.loads(resp.text) #pylint: disable=E1103
def download_script(target, url):
"""Function to download data from url to target."""
reqs = requests.get(url)
with open(target, "w+") as file_handle:
- file_handle.write(reqs.content)
+ file_handle.write(reqs.content) #pylint: disable=E1103
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
Disable falsely reported pylint errors due to unresolved library type
|
## Code Before:
"""Module hosting class representing connection to GoL."""
import json
import requests
import os
import stat
WEBSITE_URL = "http://www.gogonlinux.com"
AVAILABLE_GAMES = "/available"
BETA_GAMES = "/available-beta"
def obtain_available_games():
"""Returns JSON list of all available games."""
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
return json.loads(resp.text)
def obtain_beta_available_games():
"""Obtains JSON list of all available beta games."""
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
return json.loads(resp.text)
def download_script(target, url):
"""Function to download data from url to target."""
reqs = requests.get(url)
with open(target, "w+") as file_handle:
file_handle.write(reqs.content)
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
## Instruction:
Disable falsely reported pylint errors due to unresolved library type
## Code After:
"""Module hosting class representing connection to GoL."""
import json
import requests
import os
import stat
WEBSITE_URL = "http://www.gogonlinux.com"
AVAILABLE_GAMES = "/available"
BETA_GAMES = "/available-beta"
def obtain_available_games():
"""Returns JSON list of all available games."""
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
def obtain_beta_available_games():
"""Obtains JSON list of all available beta games."""
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
def download_script(target, url):
"""Function to download data from url to target."""
reqs = requests.get(url)
with open(target, "w+") as file_handle:
file_handle.write(reqs.content) #pylint: disable=E1103
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
...
resp = requests.get(url=(WEBSITE_URL + AVAILABLE_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
...
resp = requests.get(url=(WEBSITE_URL + BETA_GAMES))
return json.loads(resp.text) #pylint: disable=E1103
...
with open(target, "w+") as file_handle:
file_handle.write(reqs.content) #pylint: disable=E1103
os.chmod(target, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
...
|
a79801b4f67f3372231b1df2c73e52a7cc6b35a3
|
perfiles_usuario/models.py
|
perfiles_usuario/models.py
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from .utils import CAPTURISTA_GROUP
class Capturista(models.Model):
""" Extension of Django's User Model for Capturistas.
We extend the Django User Model to identify Capturistas since they have relations with
other models and close interaction with the API.
Attributes:
----------
user : django.contrib.auth.models.User
A mock user to use across all tests.
activo : BooleanField
Indicates whether the profile is active or not.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
""" Override the save method to add the capturista group.
"""
user_group = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
self.user.groups.add(user_group)
return super(Capturista, self).save(*args, **kwargs)
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from .utils import CAPTURISTA_GROUP
class Capturista(models.Model):
""" Extension of Django's User Model for Capturistas.
We extend the Django User Model to identify Capturistas since they have relations with
other models and close interaction with the API.
Attributes:
----------
user : django.contrib.auth.models.User
The django User related to Capturista (i.e. contains the actual user information).
activo : BooleanField
Indicates whether the profile is active or not.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
""" Override the save method to add the capturista group.
"""
user_group = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
self.user.groups.add(user_group)
return super(Capturista, self).save(*args, **kwargs)
|
Fix user documentation in Capturista.
|
Fix user documentation in Capturista.
|
Python
|
mit
|
erikiado/jp2_online,erikiado/jp2_online,erikiado/jp2_online
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from .utils import CAPTURISTA_GROUP
class Capturista(models.Model):
""" Extension of Django's User Model for Capturistas.
We extend the Django User Model to identify Capturistas since they have relations with
other models and close interaction with the API.
Attributes:
----------
user : django.contrib.auth.models.User
- A mock user to use across all tests.
+ The django User related to Capturista (i.e. contains the actual user information).
activo : BooleanField
Indicates whether the profile is active or not.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
""" Override the save method to add the capturista group.
"""
user_group = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
self.user.groups.add(user_group)
return super(Capturista, self).save(*args, **kwargs)
|
Fix user documentation in Capturista.
|
## Code Before:
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from .utils import CAPTURISTA_GROUP
class Capturista(models.Model):
""" Extension of Django's User Model for Capturistas.
We extend the Django User Model to identify Capturistas since they have relations with
other models and close interaction with the API.
Attributes:
----------
user : django.contrib.auth.models.User
A mock user to use across all tests.
activo : BooleanField
Indicates whether the profile is active or not.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
""" Override the save method to add the capturista group.
"""
user_group = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
self.user.groups.add(user_group)
return super(Capturista, self).save(*args, **kwargs)
## Instruction:
Fix user documentation in Capturista.
## Code After:
from django.db import models
from django.conf import settings
from django.contrib.auth.models import Group
from .utils import CAPTURISTA_GROUP
class Capturista(models.Model):
""" Extension of Django's User Model for Capturistas.
We extend the Django User Model to identify Capturistas since they have relations with
other models and close interaction with the API.
Attributes:
----------
user : django.contrib.auth.models.User
The django User related to Capturista (i.e. contains the actual user information).
activo : BooleanField
Indicates whether the profile is active or not.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL)
activo = models.BooleanField(default=True)
def save(self, *args, **kwargs):
""" Override the save method to add the capturista group.
"""
user_group = Group.objects.get_or_create(name=CAPTURISTA_GROUP)[0]
self.user.groups.add(user_group)
return super(Capturista, self).save(*args, **kwargs)
|
...
user : django.contrib.auth.models.User
The django User related to Capturista (i.e. contains the actual user information).
activo : BooleanField
...
|
562a0868b3648e3ba40c29289ba7f4ebd4c75800
|
pyinfra/api/__init__.py
|
pyinfra/api/__init__.py
|
from .config import Config # noqa: F401
from .deploy import deploy # noqa: F401
from .exceptions import ( # noqa: F401
DeployError,
InventoryError,
OperationError,
)
from .facts import FactBase # noqa: F401
from .inventory import Inventory # noqa: F401
from .operation import operation # noqa: F401
from .state import State # noqa: F401
|
from .config import Config # noqa: F401
from .deploy import deploy # noqa: F401
from .exceptions import ( # noqa: F401
DeployError,
InventoryError,
OperationError,
)
from .facts import FactBase, ShortFactBase # noqa: F401
from .inventory import Inventory # noqa: F401
from .operation import operation # noqa: F401
from .state import State # noqa: F401
|
Add `ShortFactBase` import to `pyinfra.api`.
|
Add `ShortFactBase` import to `pyinfra.api`.
|
Python
|
mit
|
Fizzadar/pyinfra,Fizzadar/pyinfra
|
from .config import Config # noqa: F401
from .deploy import deploy # noqa: F401
from .exceptions import ( # noqa: F401
DeployError,
InventoryError,
OperationError,
)
- from .facts import FactBase # noqa: F401
+ from .facts import FactBase, ShortFactBase # noqa: F401
from .inventory import Inventory # noqa: F401
from .operation import operation # noqa: F401
from .state import State # noqa: F401
|
Add `ShortFactBase` import to `pyinfra.api`.
|
## Code Before:
from .config import Config # noqa: F401
from .deploy import deploy # noqa: F401
from .exceptions import ( # noqa: F401
DeployError,
InventoryError,
OperationError,
)
from .facts import FactBase # noqa: F401
from .inventory import Inventory # noqa: F401
from .operation import operation # noqa: F401
from .state import State # noqa: F401
## Instruction:
Add `ShortFactBase` import to `pyinfra.api`.
## Code After:
from .config import Config # noqa: F401
from .deploy import deploy # noqa: F401
from .exceptions import ( # noqa: F401
DeployError,
InventoryError,
OperationError,
)
from .facts import FactBase, ShortFactBase # noqa: F401
from .inventory import Inventory # noqa: F401
from .operation import operation # noqa: F401
from .state import State # noqa: F401
|
# ... existing code ...
)
from .facts import FactBase, ShortFactBase # noqa: F401
from .inventory import Inventory # noqa: F401
# ... rest of the code ...
|
65cd819b73c4a28b67a30b46b264b330d9967582
|
flicks/users/forms.py
|
flicks/users/forms.py
|
from django import forms
from tower import ugettext_lazy as _lazy
from flicks.base.util import country_choices
from flicks.users.models import UserProfile
class UserProfileForm(forms.ModelForm):
# L10n: Used in a choice field where users can choose between receiving
# L10n: HTML-based or Text-only newsletter emails.
NEWSLETTER_FORMATS = (('html', 'HTML'), ('text', _lazy('Text')))
privacy_policy_agree = forms.BooleanField(required=True)
mailing_list_signup = forms.BooleanField(required=False)
mailing_list_format = forms.ChoiceField(required=False,
choices=NEWSLETTER_FORMATS,
initial='html')
class Meta:
model = UserProfile
fields = ('full_name', 'nickname', 'country', 'address1', 'address2',
'city', 'mailing_country', 'state', 'postal_code')
widgets = {
'full_name': forms.TextInput(attrs={'required': 'required'}),
'privacy_policy_agree': forms.CheckboxInput(
attrs={'required': 'required'}),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# Localize countries list
self.fields['country'].choices = country_choices(allow_empty=False)
self.fields['mailing_country'].choices = country_choices()
|
from django import forms
from tower import ugettext_lazy as _lazy
from flicks.base.util import country_choices
from flicks.users.models import UserProfile
class UserProfileForm(forms.ModelForm):
# L10n: Used in a choice field where users can choose between receiving
# L10n: HTML-based or Text-only newsletter emails.
NEWSLETTER_FORMATS = (('html', 'HTML'), ('text', _lazy('Text')))
privacy_policy_agree = forms.BooleanField(
required=True,
widget=forms.CheckboxInput(attrs={'required': 'required'}))
mailing_list_signup = forms.BooleanField(required=False)
mailing_list_format = forms.ChoiceField(required=False,
choices=NEWSLETTER_FORMATS,
initial='html')
class Meta:
model = UserProfile
fields = ('full_name', 'nickname', 'country', 'address1', 'address2',
'city', 'mailing_country', 'state', 'postal_code')
widgets = {
'full_name': forms.TextInput(attrs={'required': 'required'}),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# Localize countries list
self.fields['country'].choices = country_choices(allow_empty=False)
self.fields['mailing_country'].choices = country_choices()
|
Make privacy checkbox on user form required via required attribute.
|
Make privacy checkbox on user form required via required attribute.
|
Python
|
bsd-3-clause
|
mozilla/firefox-flicks,mozilla/firefox-flicks,mozilla/firefox-flicks,mozilla/firefox-flicks
|
from django import forms
from tower import ugettext_lazy as _lazy
from flicks.base.util import country_choices
from flicks.users.models import UserProfile
class UserProfileForm(forms.ModelForm):
# L10n: Used in a choice field where users can choose between receiving
# L10n: HTML-based or Text-only newsletter emails.
NEWSLETTER_FORMATS = (('html', 'HTML'), ('text', _lazy('Text')))
- privacy_policy_agree = forms.BooleanField(required=True)
+ privacy_policy_agree = forms.BooleanField(
+ required=True,
+ widget=forms.CheckboxInput(attrs={'required': 'required'}))
+
mailing_list_signup = forms.BooleanField(required=False)
mailing_list_format = forms.ChoiceField(required=False,
choices=NEWSLETTER_FORMATS,
initial='html')
class Meta:
model = UserProfile
fields = ('full_name', 'nickname', 'country', 'address1', 'address2',
'city', 'mailing_country', 'state', 'postal_code')
widgets = {
'full_name': forms.TextInput(attrs={'required': 'required'}),
- 'privacy_policy_agree': forms.CheckboxInput(
- attrs={'required': 'required'}),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# Localize countries list
self.fields['country'].choices = country_choices(allow_empty=False)
self.fields['mailing_country'].choices = country_choices()
|
Make privacy checkbox on user form required via required attribute.
|
## Code Before:
from django import forms
from tower import ugettext_lazy as _lazy
from flicks.base.util import country_choices
from flicks.users.models import UserProfile
class UserProfileForm(forms.ModelForm):
# L10n: Used in a choice field where users can choose between receiving
# L10n: HTML-based or Text-only newsletter emails.
NEWSLETTER_FORMATS = (('html', 'HTML'), ('text', _lazy('Text')))
privacy_policy_agree = forms.BooleanField(required=True)
mailing_list_signup = forms.BooleanField(required=False)
mailing_list_format = forms.ChoiceField(required=False,
choices=NEWSLETTER_FORMATS,
initial='html')
class Meta:
model = UserProfile
fields = ('full_name', 'nickname', 'country', 'address1', 'address2',
'city', 'mailing_country', 'state', 'postal_code')
widgets = {
'full_name': forms.TextInput(attrs={'required': 'required'}),
'privacy_policy_agree': forms.CheckboxInput(
attrs={'required': 'required'}),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# Localize countries list
self.fields['country'].choices = country_choices(allow_empty=False)
self.fields['mailing_country'].choices = country_choices()
## Instruction:
Make privacy checkbox on user form required via required attribute.
## Code After:
from django import forms
from tower import ugettext_lazy as _lazy
from flicks.base.util import country_choices
from flicks.users.models import UserProfile
class UserProfileForm(forms.ModelForm):
# L10n: Used in a choice field where users can choose between receiving
# L10n: HTML-based or Text-only newsletter emails.
NEWSLETTER_FORMATS = (('html', 'HTML'), ('text', _lazy('Text')))
privacy_policy_agree = forms.BooleanField(
required=True,
widget=forms.CheckboxInput(attrs={'required': 'required'}))
mailing_list_signup = forms.BooleanField(required=False)
mailing_list_format = forms.ChoiceField(required=False,
choices=NEWSLETTER_FORMATS,
initial='html')
class Meta:
model = UserProfile
fields = ('full_name', 'nickname', 'country', 'address1', 'address2',
'city', 'mailing_country', 'state', 'postal_code')
widgets = {
'full_name': forms.TextInput(attrs={'required': 'required'}),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
# Localize countries list
self.fields['country'].choices = country_choices(allow_empty=False)
self.fields['mailing_country'].choices = country_choices()
|
...
privacy_policy_agree = forms.BooleanField(
required=True,
widget=forms.CheckboxInput(attrs={'required': 'required'}))
mailing_list_signup = forms.BooleanField(required=False)
...
'full_name': forms.TextInput(attrs={'required': 'required'}),
}
...
|
3b5b3afbc66f60df45f0458ffdd0d37b9a7c50d0
|
ptoolbox/tags.py
|
ptoolbox/tags.py
|
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
|
import struct
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def jpeg_size(path):
"""Get image size.
Structure of JPEG file is:
ffd8 [ffXX SSSS DD DD ...] [ffYY SSSS DDDD ...] (S is 16bit size, D the data)
We look for the SOF0 header 0xffc0; its structure is
[ffc0 SSSS PPHH HHWW ...] where PP is 8bit precision, HHHH 16bit height, WWWW width
"""
with open(path, 'rb') as f:
_, header_type, size = struct.unpack('>HHH', f.read(6))
while header_type != 0xffc0:
f.seek(size - 2, 1)
header_type, size = struct.unpack('>HH', f.read(4))
bpi, height, width = struct.unpack('>BHH', f.read(5))
return width, height
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
|
Add homemade fast width/height reader for JPEG files
|
Add homemade fast width/height reader for JPEG files
|
Python
|
mit
|
vperron/picasa-toolbox
|
+ import struct
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
+
+
+ def jpeg_size(path):
+ """Get image size.
+ Structure of JPEG file is:
+ ffd8 [ffXX SSSS DD DD ...] [ffYY SSSS DDDD ...] (S is 16bit size, D the data)
+ We look for the SOF0 header 0xffc0; its structure is
+ [ffc0 SSSS PPHH HHWW ...] where PP is 8bit precision, HHHH 16bit height, WWWW width
+ """
+ with open(path, 'rb') as f:
+ _, header_type, size = struct.unpack('>HHH', f.read(6))
+ while header_type != 0xffc0:
+ f.seek(size - 2, 1)
+ header_type, size = struct.unpack('>HH', f.read(4))
+ bpi, height, width = struct.unpack('>BHH', f.read(5))
+ return width, height
+
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
|
Add homemade fast width/height reader for JPEG files
|
## Code Before:
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
## Instruction:
Add homemade fast width/height reader for JPEG files
## Code After:
import struct
from datetime import datetime
TAG_WIDTH = 'EXIF ExifImageWidth'
TAG_HEIGHT = 'EXIF ExifImageLength'
TAG_DATETIME = 'Image DateTime'
def jpeg_size(path):
"""Get image size.
Structure of JPEG file is:
ffd8 [ffXX SSSS DD DD ...] [ffYY SSSS DDDD ...] (S is 16bit size, D the data)
We look for the SOF0 header 0xffc0; its structure is
[ffc0 SSSS PPHH HHWW ...] where PP is 8bit precision, HHHH 16bit height, WWWW width
"""
with open(path, 'rb') as f:
_, header_type, size = struct.unpack('>HHH', f.read(6))
while header_type != 0xffc0:
f.seek(size - 2, 1)
header_type, size = struct.unpack('>HH', f.read(4))
bpi, height, width = struct.unpack('>BHH', f.read(5))
return width, height
def parse_time(tags):
tag = tags.get(TAG_DATETIME, None)
if not tag:
raise KeyError(TAG_DATETIME)
return datetime.strptime(str(tag), "%Y:%m:%d %H:%M:%S")
def parse_width(tags):
tag = tags.get(TAG_WIDTH, None)
if not tag:
raise KeyError(TAG_WIDTH)
return int(str(tag), 10)
def parse_height(tags):
tag = tags.get(TAG_HEIGHT, None)
if not tag:
raise KeyError(TAG_HEIGHT)
return int(str(tag), 10)
|
// ... existing code ...
import struct
from datetime import datetime
// ... modified code ...
TAG_DATETIME = 'Image DateTime'
def jpeg_size(path):
"""Get image size.
Structure of JPEG file is:
ffd8 [ffXX SSSS DD DD ...] [ffYY SSSS DDDD ...] (S is 16bit size, D the data)
We look for the SOF0 header 0xffc0; its structure is
[ffc0 SSSS PPHH HHWW ...] where PP is 8bit precision, HHHH 16bit height, WWWW width
"""
with open(path, 'rb') as f:
_, header_type, size = struct.unpack('>HHH', f.read(6))
while header_type != 0xffc0:
f.seek(size - 2, 1)
header_type, size = struct.unpack('>HH', f.read(4))
bpi, height, width = struct.unpack('>BHH', f.read(5))
return width, height
// ... rest of the code ...
|
6e80bcef30b6b4485fa5e3f269f13fc62380c422
|
tests/test_evaluate.py
|
tests/test_evaluate.py
|
import numpy as np
from numpy.testing import assert_equal
from gala import evaluate as ev
def test_contingency_table():
seg = np.array([0, 1, 1, 1, 2, 2, 2, 3])
gt = np.array([1, 1, 1, 2, 2, 2, 2, 0])
ct = ev.contingency_table(seg, gt, ignore_seg=[], ignore_gt=[])
ct0 = ev.contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0])
ctd = ct.todense()
assert_equal(ctd, np.array([[0. , 0.125, 0. ],
[0. , 0.25 , 0.125],
[0. , 0. , 0.375],
[0.125, 0. , 0. ]]))
assert ct.shape == ct0.shape
def test_vi():
seg = np.array([1, 2, 3, 4])
gt = np.array([1, 1, 8, 8])
assert_equal(ev.vi(seg, gt), 1)
def test_are():
seg = np.eye(3)
gt = np.eye(3)
seg[1][1] = 0
assert seg.shape == gt.shape
|
import numpy as np
from numpy.testing import assert_equal
from gala import evaluate as ev
def test_contingency_table():
seg = np.array([0, 1, 1, 1, 2, 2, 2, 3])
gt = np.array([1, 1, 1, 2, 2, 2, 2, 0])
ct = ev.contingency_table(seg, gt, ignore_seg=[], ignore_gt=[])
ct0 = ev.contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0])
ctd = ct.todense()
assert_equal(ctd, np.array([[0. , 0.125, 0. ],
[0. , 0.25 , 0.125],
[0. , 0. , 0.375],
[0.125, 0. , 0. ]]))
assert ct.shape == ct0.shape
def test_vi():
seg = np.array([1, 2, 3, 4])
gt = np.array([1, 1, 8, 8])
assert_equal(ev.vi(seg, gt), 1)
def test_are():
seg = np.array([[0,1], [1,0]])
gt = np.array([[1,2],[0,1]])
assert_almost_equal(ev.adapted_rand_error(seg,gt),0.081)
assert seg.shape == gt.shape
|
Add in test for ARE
|
Add in test for ARE
|
Python
|
bsd-3-clause
|
jni/gala,janelia-flyem/gala
|
import numpy as np
from numpy.testing import assert_equal
from gala import evaluate as ev
def test_contingency_table():
seg = np.array([0, 1, 1, 1, 2, 2, 2, 3])
gt = np.array([1, 1, 1, 2, 2, 2, 2, 0])
ct = ev.contingency_table(seg, gt, ignore_seg=[], ignore_gt=[])
ct0 = ev.contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0])
ctd = ct.todense()
assert_equal(ctd, np.array([[0. , 0.125, 0. ],
[0. , 0.25 , 0.125],
[0. , 0. , 0.375],
[0.125, 0. , 0. ]]))
assert ct.shape == ct0.shape
def test_vi():
seg = np.array([1, 2, 3, 4])
gt = np.array([1, 1, 8, 8])
assert_equal(ev.vi(seg, gt), 1)
def test_are():
- seg = np.eye(3)
- gt = np.eye(3)
- seg[1][1] = 0
+ seg = np.array([[0,1], [1,0]])
+ gt = np.array([[1,2],[0,1]])
+ assert_almost_equal(ev.adapted_rand_error(seg,gt),0.081)
assert seg.shape == gt.shape
|
Add in test for ARE
|
## Code Before:
import numpy as np
from numpy.testing import assert_equal
from gala import evaluate as ev
def test_contingency_table():
seg = np.array([0, 1, 1, 1, 2, 2, 2, 3])
gt = np.array([1, 1, 1, 2, 2, 2, 2, 0])
ct = ev.contingency_table(seg, gt, ignore_seg=[], ignore_gt=[])
ct0 = ev.contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0])
ctd = ct.todense()
assert_equal(ctd, np.array([[0. , 0.125, 0. ],
[0. , 0.25 , 0.125],
[0. , 0. , 0.375],
[0.125, 0. , 0. ]]))
assert ct.shape == ct0.shape
def test_vi():
seg = np.array([1, 2, 3, 4])
gt = np.array([1, 1, 8, 8])
assert_equal(ev.vi(seg, gt), 1)
def test_are():
seg = np.eye(3)
gt = np.eye(3)
seg[1][1] = 0
assert seg.shape == gt.shape
## Instruction:
Add in test for ARE
## Code After:
import numpy as np
from numpy.testing import assert_equal
from gala import evaluate as ev
def test_contingency_table():
seg = np.array([0, 1, 1, 1, 2, 2, 2, 3])
gt = np.array([1, 1, 1, 2, 2, 2, 2, 0])
ct = ev.contingency_table(seg, gt, ignore_seg=[], ignore_gt=[])
ct0 = ev.contingency_table(seg, gt, ignore_seg=[0], ignore_gt=[0])
ctd = ct.todense()
assert_equal(ctd, np.array([[0. , 0.125, 0. ],
[0. , 0.25 , 0.125],
[0. , 0. , 0.375],
[0.125, 0. , 0. ]]))
assert ct.shape == ct0.shape
def test_vi():
seg = np.array([1, 2, 3, 4])
gt = np.array([1, 1, 8, 8])
assert_equal(ev.vi(seg, gt), 1)
def test_are():
seg = np.array([[0,1], [1,0]])
gt = np.array([[1,2],[0,1]])
assert_almost_equal(ev.adapted_rand_error(seg,gt),0.081)
assert seg.shape == gt.shape
|
// ... existing code ...
def test_are():
seg = np.array([[0,1], [1,0]])
gt = np.array([[1,2],[0,1]])
assert_almost_equal(ev.adapted_rand_error(seg,gt),0.081)
assert seg.shape == gt.shape
// ... rest of the code ...
|
43c1f230382a3b7ad7776d28840c5305bb919ab9
|
jujugui/__init__.py
|
jujugui/__init__.py
|
from pyramid.config import Configurator
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
return make_application(config)
def make_application(config):
"""Set up the routes and return the WSGI application."""
# We use two separate included app/routes so that we can
# have the gui parts behind a separate route from the
# assets when we embed it in e.g. the storefront.
config.include('jujugui.gui')
config.include('jujugui.assets')
return config.make_wsgi_app()
|
from pyramid.config import Configurator
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
return make_application(config)
def make_application(config):
"""Set up the routes and return the WSGI application."""
# We use two separate included app/routes so that we can
# have the gui parts behind a separate route from the
# assets when we embed it in e.g. the storefront.
# NOTE: kadams54, 2015-08-04: It's very important that assets be listed
# first; if it isn't, then the jujugui.gui routes override those specified
# in assets and any asset requests will go to the main app.
config.include('jujugui.assets')
config.include('jujugui.gui')
return config.make_wsgi_app()
|
Fix load order to fix routes.
|
Fix load order to fix routes.
|
Python
|
agpl-3.0
|
bac/juju-gui,bac/juju-gui,mitechie/juju-gui,mitechie/juju-gui,mitechie/juju-gui,mitechie/juju-gui,bac/juju-gui,bac/juju-gui
|
from pyramid.config import Configurator
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
return make_application(config)
def make_application(config):
"""Set up the routes and return the WSGI application."""
# We use two separate included app/routes so that we can
# have the gui parts behind a separate route from the
# assets when we embed it in e.g. the storefront.
+ # NOTE: kadams54, 2015-08-04: It's very important that assets be listed
+ # first; if it isn't, then the jujugui.gui routes override those specified
+ # in assets and any asset requests will go to the main app.
+ config.include('jujugui.assets')
config.include('jujugui.gui')
- config.include('jujugui.assets')
return config.make_wsgi_app()
|
Fix load order to fix routes.
|
## Code Before:
from pyramid.config import Configurator
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
return make_application(config)
def make_application(config):
"""Set up the routes and return the WSGI application."""
# We use two separate included app/routes so that we can
# have the gui parts behind a separate route from the
# assets when we embed it in e.g. the storefront.
config.include('jujugui.gui')
config.include('jujugui.assets')
return config.make_wsgi_app()
## Instruction:
Fix load order to fix routes.
## Code After:
from pyramid.config import Configurator
def main(global_config, **settings):
"""Return a Pyramid WSGI application."""
config = Configurator(settings=settings)
return make_application(config)
def make_application(config):
"""Set up the routes and return the WSGI application."""
# We use two separate included app/routes so that we can
# have the gui parts behind a separate route from the
# assets when we embed it in e.g. the storefront.
# NOTE: kadams54, 2015-08-04: It's very important that assets be listed
# first; if it isn't, then the jujugui.gui routes override those specified
# in assets and any asset requests will go to the main app.
config.include('jujugui.assets')
config.include('jujugui.gui')
return config.make_wsgi_app()
|
# ... existing code ...
# assets when we embed it in e.g. the storefront.
# NOTE: kadams54, 2015-08-04: It's very important that assets be listed
# first; if it isn't, then the jujugui.gui routes override those specified
# in assets and any asset requests will go to the main app.
config.include('jujugui.assets')
config.include('jujugui.gui')
return config.make_wsgi_app()
# ... rest of the code ...
|
d47e3b7216effab8aa067d0a214b071ca77393fd
|
stories/serializers.py
|
stories/serializers.py
|
from rest_framework import serializers
from users.serializers import AuthorSerializer
from .models import Story, StoryLine
class StoryLineSerializer(serializers.ModelSerializer):
class Meta:
model = StoryLine
fields = ('id', 'content', 'posted_on')
class StorySerializer(serializers.ModelSerializer):
title = serializers.CharField(min_length=3, max_length=100)
author = AuthorSerializer(read_only=True)
storyline_set = StoryLineSerializer(many=True)
class Meta:
model = Story
fields = ('id', 'title', 'author', 'posted_on', 'storyline_set')
def create(self, validated_data):
request = self.context['request']
author = request.user.author
return Story.objects.create(author=author, **validated_data)
|
from rest_framework import serializers
from users.serializers import AuthorSerializer
from .models import Story, StoryLine
class StoryLineSerializer(serializers.ModelSerializer):
class Meta:
model = StoryLine
fields = ('id', 'content', 'posted_on')
class StorySerializer(serializers.ModelSerializer):
title = serializers.CharField(min_length=3, max_length=100)
author = AuthorSerializer(read_only=True)
storyline_set = StoryLineSerializer(read_only=True, many=True)
class Meta:
model = Story
fields = ('id', 'title', 'author', 'posted_on', 'storyline_set')
def create(self, validated_data):
request = self.context['request']
author = request.user.author
return Story.objects.create(author=author, **validated_data)
|
Set stories_set to read only field
|
Set stories_set to read only field
|
Python
|
mit
|
pu6ki/tarina,pu6ki/tarina,pu6ki/tarina
|
from rest_framework import serializers
from users.serializers import AuthorSerializer
from .models import Story, StoryLine
class StoryLineSerializer(serializers.ModelSerializer):
class Meta:
model = StoryLine
fields = ('id', 'content', 'posted_on')
class StorySerializer(serializers.ModelSerializer):
title = serializers.CharField(min_length=3, max_length=100)
author = AuthorSerializer(read_only=True)
- storyline_set = StoryLineSerializer(many=True)
+ storyline_set = StoryLineSerializer(read_only=True, many=True)
class Meta:
model = Story
fields = ('id', 'title', 'author', 'posted_on', 'storyline_set')
def create(self, validated_data):
request = self.context['request']
author = request.user.author
return Story.objects.create(author=author, **validated_data)
|
Set stories_set to read only field
|
## Code Before:
from rest_framework import serializers
from users.serializers import AuthorSerializer
from .models import Story, StoryLine
class StoryLineSerializer(serializers.ModelSerializer):
class Meta:
model = StoryLine
fields = ('id', 'content', 'posted_on')
class StorySerializer(serializers.ModelSerializer):
title = serializers.CharField(min_length=3, max_length=100)
author = AuthorSerializer(read_only=True)
storyline_set = StoryLineSerializer(many=True)
class Meta:
model = Story
fields = ('id', 'title', 'author', 'posted_on', 'storyline_set')
def create(self, validated_data):
request = self.context['request']
author = request.user.author
return Story.objects.create(author=author, **validated_data)
## Instruction:
Set stories_set to read only field
## Code After:
from rest_framework import serializers
from users.serializers import AuthorSerializer
from .models import Story, StoryLine
class StoryLineSerializer(serializers.ModelSerializer):
class Meta:
model = StoryLine
fields = ('id', 'content', 'posted_on')
class StorySerializer(serializers.ModelSerializer):
title = serializers.CharField(min_length=3, max_length=100)
author = AuthorSerializer(read_only=True)
storyline_set = StoryLineSerializer(read_only=True, many=True)
class Meta:
model = Story
fields = ('id', 'title', 'author', 'posted_on', 'storyline_set')
def create(self, validated_data):
request = self.context['request']
author = request.user.author
return Story.objects.create(author=author, **validated_data)
|
# ... existing code ...
author = AuthorSerializer(read_only=True)
storyline_set = StoryLineSerializer(read_only=True, many=True)
# ... rest of the code ...
|
2e3b38d102c7e15ed121651c1eac26acd9c7f399
|
grapdashboard.py
|
grapdashboard.py
|
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class UQAMDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.AppList(
title=_('Catalogue'),
column=1,
collapsible=False,
models=('cat.models.MuseumObject', 'parties.*', 'location.*',
'loans.models.LoanAgreement', 'condition.*',
'uqamcollections.*'),
exclude=('django.contrib.*', 'djcelery.*', 'reports.*'),
))
self.children.append(modules.AppList(
title='Data dictionary',
column=1,
models=('cat.models.*', 'loans.models.LoanPurpose'),
exclude=('cat.models.MuseumObject',)
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
column=1,
collapsible=True,
models=('django.contrib.*', 'djcelergy.*', 'reports.*',
'dataimport.*', 'mediaman.*'),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
column=3,
collapsible=False,
limit=5,
))
|
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class UQAMDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.AppList(
title=_('Catalogue'),
column=1,
collapsible=False,
models=('cat.models.MuseumObject', 'parties.*', 'location.*',
'loans.models.LoanAgreement', 'condition.*',
'uqamcollections.*'),
exclude=('django.contrib.*', 'djcelery.*', 'reports.*'),
))
self.children.append(modules.AppList(
title='Data dictionary',
column=1,
models=('cat.models.*', 'loans.models.LoanPurpose'),
exclude=('cat.models.MuseumObject',)
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
column=1,
collapsible=True,
models=('django.contrib.*', 'djcelergy.*', 'reports.*',
'dataimport.*', 'mediaman.*'),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
column=2,
collapsible=False,
limit=5,
))
self.children.append(modules.LinkList(
layout='inline',
title=_('Admin tools'),
column=2,
children=(
['Upload media', '/mediaman/bulk_upload/'],
['Filter/query items', '/admin/cat/museumobject/search'],
)
))
|
Add links to admin dashboard
|
Add links to admin dashboard
|
Python
|
bsd-3-clause
|
uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam
|
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class UQAMDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.AppList(
title=_('Catalogue'),
column=1,
collapsible=False,
models=('cat.models.MuseumObject', 'parties.*', 'location.*',
'loans.models.LoanAgreement', 'condition.*',
'uqamcollections.*'),
exclude=('django.contrib.*', 'djcelery.*', 'reports.*'),
))
self.children.append(modules.AppList(
title='Data dictionary',
column=1,
models=('cat.models.*', 'loans.models.LoanPurpose'),
exclude=('cat.models.MuseumObject',)
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
column=1,
collapsible=True,
models=('django.contrib.*', 'djcelergy.*', 'reports.*',
'dataimport.*', 'mediaman.*'),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
- column=3,
+ column=2,
collapsible=False,
limit=5,
))
+ self.children.append(modules.LinkList(
+ layout='inline',
+ title=_('Admin tools'),
+ column=2,
+ children=(
+ ['Upload media', '/mediaman/bulk_upload/'],
+ ['Filter/query items', '/admin/cat/museumobject/search'],
+ )
+ ))
+
|
Add links to admin dashboard
|
## Code Before:
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class UQAMDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.AppList(
title=_('Catalogue'),
column=1,
collapsible=False,
models=('cat.models.MuseumObject', 'parties.*', 'location.*',
'loans.models.LoanAgreement', 'condition.*',
'uqamcollections.*'),
exclude=('django.contrib.*', 'djcelery.*', 'reports.*'),
))
self.children.append(modules.AppList(
title='Data dictionary',
column=1,
models=('cat.models.*', 'loans.models.LoanPurpose'),
exclude=('cat.models.MuseumObject',)
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
column=1,
collapsible=True,
models=('django.contrib.*', 'djcelergy.*', 'reports.*',
'dataimport.*', 'mediaman.*'),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
column=3,
collapsible=False,
limit=5,
))
## Instruction:
Add links to admin dashboard
## Code After:
from django.utils.translation import ugettext_lazy as _
from grappelli.dashboard import modules, Dashboard
class UQAMDashboard(Dashboard):
def __init__(self, **kwargs):
Dashboard.__init__(self, **kwargs)
self.children.append(modules.AppList(
title=_('Catalogue'),
column=1,
collapsible=False,
models=('cat.models.MuseumObject', 'parties.*', 'location.*',
'loans.models.LoanAgreement', 'condition.*',
'uqamcollections.*'),
exclude=('django.contrib.*', 'djcelery.*', 'reports.*'),
))
self.children.append(modules.AppList(
title='Data dictionary',
column=1,
models=('cat.models.*', 'loans.models.LoanPurpose'),
exclude=('cat.models.MuseumObject',)
))
# append an app list module for "Administration"
self.children.append(modules.AppList(
title=_('Administration'),
column=1,
collapsible=True,
models=('django.contrib.*', 'djcelergy.*', 'reports.*',
'dataimport.*', 'mediaman.*'),
))
# append a recent actions module
self.children.append(modules.RecentActions(
title=_('Recent Actions'),
column=2,
collapsible=False,
limit=5,
))
self.children.append(modules.LinkList(
layout='inline',
title=_('Admin tools'),
column=2,
children=(
['Upload media', '/mediaman/bulk_upload/'],
['Filter/query items', '/admin/cat/museumobject/search'],
)
))
|
# ... existing code ...
title=_('Recent Actions'),
column=2,
collapsible=False,
# ... modified code ...
))
self.children.append(modules.LinkList(
layout='inline',
title=_('Admin tools'),
column=2,
children=(
['Upload media', '/mediaman/bulk_upload/'],
['Filter/query items', '/admin/cat/museumobject/search'],
)
))
# ... rest of the code ...
|
e1b2b35e36566e92bf789c4d5ab7c668d520f492
|
taca/illumina/NextSeq_Runs.py
|
taca/illumina/NextSeq_Runs.py
|
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.Runs import Run
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.utils import misc
from flowcell_parser.classes import RunParametersParser, SampleSheetParser, RunParser, LaneBarcodeParser, DemuxSummaryParser
import logging
logger = logging.getLogger(__name__)
class NextSeq_Run(HiSeqX_Run):
def __init__(self, run_dir, samplesheet_folders):
super(NextSeq_Run, self).__init__(run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "NextSeq"
def _set_run_type(self):
self.run_type = "NGI-RUN"
|
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.Runs import Run
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.utils import misc
import logging
logger = logging.getLogger(__name__)
class NextSeq_Run(HiSeqX_Run):
def __init__(self, run_dir, samplesheet_folders):
super(NextSeq_Run, self).__init__(run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "NextSeq"
def _set_run_type(self):
self.run_type = "NGI-RUN"
|
Clear samplesheet parser from header
|
Clear samplesheet parser from header
|
Python
|
mit
|
SciLifeLab/TACA,SciLifeLab/TACA,SciLifeLab/TACA
|
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.Runs import Run
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.utils import misc
- from flowcell_parser.classes import RunParametersParser, SampleSheetParser, RunParser, LaneBarcodeParser, DemuxSummaryParser
-
import logging
logger = logging.getLogger(__name__)
class NextSeq_Run(HiSeqX_Run):
def __init__(self, run_dir, samplesheet_folders):
super(NextSeq_Run, self).__init__(run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "NextSeq"
def _set_run_type(self):
self.run_type = "NGI-RUN"
|
Clear samplesheet parser from header
|
## Code Before:
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.Runs import Run
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.utils import misc
from flowcell_parser.classes import RunParametersParser, SampleSheetParser, RunParser, LaneBarcodeParser, DemuxSummaryParser
import logging
logger = logging.getLogger(__name__)
class NextSeq_Run(HiSeqX_Run):
def __init__(self, run_dir, samplesheet_folders):
super(NextSeq_Run, self).__init__(run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "NextSeq"
def _set_run_type(self):
self.run_type = "NGI-RUN"
## Instruction:
Clear samplesheet parser from header
## Code After:
import os
import re
import csv
import glob
import shutil
import gzip
import operator
import subprocess
from datetime import datetime
from taca.utils.filesystem import chdir, control_fastq_filename
from taca.illumina.Runs import Run
from taca.illumina.HiSeqX_Runs import HiSeqX_Run
from taca.utils import misc
import logging
logger = logging.getLogger(__name__)
class NextSeq_Run(HiSeqX_Run):
def __init__(self, run_dir, samplesheet_folders):
super(NextSeq_Run, self).__init__(run_dir, samplesheet_folders)
self._set_sequencer_type()
self._set_run_type()
def _set_sequencer_type(self):
self.sequencer_type = "NextSeq"
def _set_run_type(self):
self.run_type = "NGI-RUN"
|
# ... existing code ...
from taca.utils import misc
# ... rest of the code ...
|
43b46f1e3ded3972dede7226cf0255b904d028bd
|
django/notejam/pads/tests.py
|
django/notejam/pads/tests.py
|
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': '[email protected]',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
Test improvementes. Empty Pad test class added.
|
Django: Test improvementes. Empty Pad test class added.
|
Python
|
mit
|
hstaugaard/notejam,nadavge/notejam,lefloh/notejam,lefloh/notejam,williamn/notejam,hstaugaard/notejam,nadavge/notejam,williamn/notejam,hstaugaard/notejam,hstaugaard/notejam,lefloh/notejam,lefloh/notejam,williamn/notejam,nadavge/notejam,lefloh/notejam,hstaugaard/notejam,williamn/notejam,shikhardb/notejam,williamn/notejam,williamn/notejam,hstaugaard/notejam,shikhardb/notejam,lefloh/notejam,hstaugaard/notejam,lefloh/notejam,williamn/notejam,shikhardb/notejam,shikhardb/notejam,nadavge/notejam,hstaugaard/notejam,nadavge/notejam,shikhardb/notejam
|
-
+ from django.contrib.auth.models import User
+ from django.core.urlresolvers import reverse
from django.test import TestCase
- class SimpleTest(TestCase):
+ class PadTest(TestCase):
- def test_basic_addition(self):
+ def setUp(self):
+ user_data = {
+ 'email': '[email protected]',
+ 'password': 'secure_password'
- """
+ }
- Tests that 1 + 1 always equals 2.
- """
- self.assertEqual(1 + 1, 2)
+ user = User.objects.create(username=user_data['email'], **user_data)
+ user.set_password(user_data['password'])
+ user.save()
+ self.client.login(**user_data)
+
+ def _get_pad_data(self):
+ pass
+
+ def test_create_pad_success(self):
+ pass
+
|
Test improvementes. Empty Pad test class added.
|
## Code Before:
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
## Instruction:
Test improvementes. Empty Pad test class added.
## Code After:
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': '[email protected]',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
|
# ... existing code ...
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
# ... modified code ...
class PadTest(TestCase):
def setUp(self):
user_data = {
'email': '[email protected]',
'password': 'secure_password'
}
user = User.objects.create(username=user_data['email'], **user_data)
user.set_password(user_data['password'])
user.save()
self.client.login(**user_data)
def _get_pad_data(self):
pass
def test_create_pad_success(self):
pass
# ... rest of the code ...
|
1726a73b81c8a7dfc3610690fe9272776e930f0f
|
aero/adapters/bower.py
|
aero/adapters/bower.py
|
__author__ = 'oliveiraev'
__all__ = ['Bower']
from re import sub
from re import split
from aero.__version__ import enc
from .base import BaseAdapter
class Bower(BaseAdapter):
"""
Twitter Bower - Browser package manager - Adapter
"""
def search(self, query):
return {}
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
lst = dict([(self.package_name(k), v) for k, v in [
line.lstrip(' -').split(' ') for line in response.splitlines()
if line.startswith(' - ')]
])
if lst:
return lst
def install(self, query):
return self.shell('install', query)
def info(self, query):
response = self.command('info', query, ['--no-color'])[0].decode(*enc)
return response or ['Aborted: No info available']
|
__author__ = 'oliveiraev'
__all__ = ['Bower']
from re import sub
from re import split
from aero.__version__ import enc
from .base import BaseAdapter
class Bower(BaseAdapter):
"""
Twitter Bower - Browser package manager - Adapter
"""
def search(self, query):
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
lst = dict([(self.package_name(k), v) for k, v in [
line.lstrip(' -').split(' ') for line in response.splitlines()
if line.startswith(' - ')]
])
return lst or {}
def install(self, query):
return self.shell('install', query)
def info(self, query):
response = self.command('info', query, ['--no-color'])[0].decode(*enc)
return response or ['Aborted: No info available']
|
Simplify return while we're at it
|
Simplify return while we're at it
|
Python
|
bsd-3-clause
|
Aeronautics/aero
|
__author__ = 'oliveiraev'
__all__ = ['Bower']
from re import sub
from re import split
from aero.__version__ import enc
from .base import BaseAdapter
class Bower(BaseAdapter):
"""
Twitter Bower - Browser package manager - Adapter
"""
def search(self, query):
- return {}
-
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
lst = dict([(self.package_name(k), v) for k, v in [
line.lstrip(' -').split(' ') for line in response.splitlines()
if line.startswith(' - ')]
])
- if lst:
- return lst
+ return lst or {}
def install(self, query):
return self.shell('install', query)
def info(self, query):
response = self.command('info', query, ['--no-color'])[0].decode(*enc)
return response or ['Aborted: No info available']
|
Simplify return while we're at it
|
## Code Before:
__author__ = 'oliveiraev'
__all__ = ['Bower']
from re import sub
from re import split
from aero.__version__ import enc
from .base import BaseAdapter
class Bower(BaseAdapter):
"""
Twitter Bower - Browser package manager - Adapter
"""
def search(self, query):
return {}
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
lst = dict([(self.package_name(k), v) for k, v in [
line.lstrip(' -').split(' ') for line in response.splitlines()
if line.startswith(' - ')]
])
if lst:
return lst
def install(self, query):
return self.shell('install', query)
def info(self, query):
response = self.command('info', query, ['--no-color'])[0].decode(*enc)
return response or ['Aborted: No info available']
## Instruction:
Simplify return while we're at it
## Code After:
__author__ = 'oliveiraev'
__all__ = ['Bower']
from re import sub
from re import split
from aero.__version__ import enc
from .base import BaseAdapter
class Bower(BaseAdapter):
"""
Twitter Bower - Browser package manager - Adapter
"""
def search(self, query):
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
lst = dict([(self.package_name(k), v) for k, v in [
line.lstrip(' -').split(' ') for line in response.splitlines()
if line.startswith(' - ')]
])
return lst or {}
def install(self, query):
return self.shell('install', query)
def info(self, query):
response = self.command('info', query, ['--no-color'])[0].decode(*enc)
return response or ['Aborted: No info available']
|
# ... existing code ...
def search(self, query):
response = self.command('search', query, ['--no-color'])[0].decode(*enc)
# ... modified code ...
])
return lst or {}
# ... rest of the code ...
|
5490939f5b94b15c154e027abcd295f14ac17a45
|
src/config/site_utils.py
|
src/config/site_utils.py
|
from django.contrib.sites.models import Site
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
site = Site.objects.get_current()
if site.domain != domain:
site.domain = domain
site.name = name
site.save()
|
from django.contrib.sites.models import Site
from django.db.utils import ProgrammingError
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
try:
site = Site.objects.get_current()
if site.domain != domain:
site.domain = domain
site.name = name
site.save()
except ProgrammingError:
pass
|
Make sure initial migration works for new installs.
|
Make sure initial migration works for new installs.
Bootstrapping the Site model entirely in settings isn't great.
|
Python
|
mit
|
datahuborg/datahub,datahuborg/datahub,anantb/datahub,RogerTangos/datahub-stub,anantb/datahub,RogerTangos/datahub-stub,datahuborg/datahub,anantb/datahub,RogerTangos/datahub-stub,RogerTangos/datahub-stub,RogerTangos/datahub-stub,datahuborg/datahub,datahuborg/datahub,anantb/datahub,anantb/datahub,RogerTangos/datahub-stub,datahuborg/datahub,anantb/datahub,RogerTangos/datahub-stub,datahuborg/datahub,anantb/datahub
|
from django.contrib.sites.models import Site
+ from django.db.utils import ProgrammingError
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
+ try:
- site = Site.objects.get_current()
+ site = Site.objects.get_current()
- if site.domain != domain:
+ if site.domain != domain:
- site.domain = domain
+ site.domain = domain
- site.name = name
+ site.name = name
- site.save()
+ site.save()
+ except ProgrammingError:
+ pass
|
Make sure initial migration works for new installs.
|
## Code Before:
from django.contrib.sites.models import Site
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
site = Site.objects.get_current()
if site.domain != domain:
site.domain = domain
site.name = name
site.save()
## Instruction:
Make sure initial migration works for new installs.
## Code After:
from django.contrib.sites.models import Site
from django.db.utils import ProgrammingError
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
try:
site = Site.objects.get_current()
if site.domain != domain:
site.domain = domain
site.name = name
site.save()
except ProgrammingError:
pass
|
// ... existing code ...
from django.contrib.sites.models import Site
from django.db.utils import ProgrammingError
// ... modified code ...
def set_site_info(domain='datahub-local.mit.edu', name='MIT DataHub'):
try:
site = Site.objects.get_current()
if site.domain != domain:
site.domain = domain
site.name = name
site.save()
except ProgrammingError:
pass
// ... rest of the code ...
|
92759e9df89664ae515e51825982141750921ce3
|
src/sample_xblocks/basic/test/test_view_counter.py
|
src/sample_xblocks/basic/test/test_view_counter.py
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
Use the correct location of view_counter in test
|
Use the correct location of view_counter in test
|
Python
|
apache-2.0
|
stvstnfrd/xblock-sdk,dcadams/xblock-sdk,edx/xblock-sdk,jamiefolsom/xblock-sdk,edx/xblock-sdk,stvstnfrd/xblock-sdk,nagyistoce/edx-xblock-sdk,lovehhf/xblock-sdk,edx-solutions/xblock-sdk,Pilou81715/hackathon_edX,Pilou81715/hackathon_edX,edx-solutions/xblock-sdk,lovehhf/xblock-sdk,lovehhf/xblock-sdk,nagyistoce/edx-xblock-sdk,dcadams/xblock-sdk,jamiefolsom/xblock-sdk,jamiefolsom/xblock-sdk,Lyla-Fischer/xblock-sdk,Lyla-Fischer/xblock-sdk,stvstnfrd/xblock-sdk,lovehhf/xblock-sdk,Lyla-Fischer/xblock-sdk,edx/xblock-sdk,Pilou81715/hackathon_edX,nagyistoce/edx-xblock-sdk,edx-solutions/xblock-sdk,Pilou81715/hackathon_edX,dcadams/xblock-sdk,nagyistoce/edx-xblock-sdk,edx-solutions/xblock-sdk,jamiefolsom/xblock-sdk
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
- from xblock.view_counter import ViewCounter
+ from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
Use the correct location of view_counter in test
|
## Code Before:
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
## Instruction:
Use the correct location of view_counter in test
## Code After:
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
// ... existing code ...
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
// ... rest of the code ...
|
aecc14ea11cae2bb27ee2534a229e7af8453053e
|
readthedocs/rtd_tests/tests/test_hacks.py
|
readthedocs/rtd_tests/tests/test_hacks.py
|
from django.test import TestCase
from readthedocs.core import hacks
class TestHacks(TestCase):
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
hacks.patch_meta_path()
def tearDown(self):
hacks.unpatch_meta_path()
def test_hack_failed_import(self):
import boogy
self.assertTrue(str(boogy), "<Silly Human, I'm not real>")
def test_hack_correct_import(self):
import itertools
self.assertFalse(str(itertools), "<Silly Human, I'm not real>")
|
from django.test import TestCase
from core import hacks
class TestHacks(TestCase):
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
hacks.patch_meta_path()
def tearDown(self):
hacks.unpatch_meta_path()
def test_hack_failed_import(self):
import boogy
self.assertTrue(str(boogy), "<Silly Human, I'm not real>")
def test_hack_correct_import(self):
import itertools
self.assertFalse(str(itertools), "<Silly Human, I'm not real>")
|
Fix import to not include project.
|
Fix import to not include project.
|
Python
|
mit
|
agjohnson/readthedocs.org,pombredanne/readthedocs.org,wijerasa/readthedocs.org,atsuyim/readthedocs.org,CedarLogic/readthedocs.org,safwanrahman/readthedocs.org,nyergler/pythonslides,sunnyzwh/readthedocs.org,raven47git/readthedocs.org,CedarLogic/readthedocs.org,michaelmcandrew/readthedocs.org,fujita-shintaro/readthedocs.org,GovReady/readthedocs.org,singingwolfboy/readthedocs.org,kenwang76/readthedocs.org,agjohnson/readthedocs.org,clarkperkins/readthedocs.org,titiushko/readthedocs.org,kenshinthebattosai/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,royalwang/readthedocs.org,KamranMackey/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,kdkeyser/readthedocs.org,kenwang76/readthedocs.org,gjtorikian/readthedocs.org,stevepiercy/readthedocs.org,KamranMackey/readthedocs.org,mhils/readthedocs.org,michaelmcandrew/readthedocs.org,kenshinthebattosai/readthedocs.org,laplaceliu/readthedocs.org,sils1297/readthedocs.org,rtfd/readthedocs.org,kenwang76/readthedocs.org,takluyver/readthedocs.org,cgourlay/readthedocs.org,wanghaven/readthedocs.org,sunnyzwh/readthedocs.org,sid-kap/readthedocs.org,laplaceliu/readthedocs.org,rtfd/readthedocs.org,espdev/readthedocs.org,sils1297/readthedocs.org,ojii/readthedocs.org,tddv/readthedocs.org,cgourlay/readthedocs.org,hach-que/readthedocs.org,Tazer/readthedocs.org,istresearch/readthedocs.org,cgourlay/readthedocs.org,techtonik/readthedocs.org,royalwang/readthedocs.org,Tazer/readthedocs.org,laplaceliu/readthedocs.org,kenshinthebattosai/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,sid-kap/readthedocs.org,soulshake/readthedocs.org,laplaceliu/readthedocs.org,nyergler/pythonslides,takluyver/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,clarkperkins/readthedocs.org,d0ugal/readthedocs.org,GovReady/readthedocs.org,tddv/readthedocs.org,wijerasa/readthedocs.org,stevepiercy/readthedocs.org,KamranMackey/readthedocs.org,alex/readthedocs.org,singingwolfboy/readthedocs.org,jerel/readthedocs.org,fujita-shintaro/readthedocs.org,pombredanne/readthedocs.org,royalwang/readthedocs.org,kenshinthebattosai/readthedocs.org,royalwang/readthedocs.org,dirn/readthedocs.org,espdev/readthedocs.org,rtfd/readthedocs.org,istresearch/readthedocs.org,emawind84/readthedocs.org,techtonik/readthedocs.org,emawind84/readthedocs.org,fujita-shintaro/readthedocs.org,SteveViss/readthedocs.org,sunnyzwh/readthedocs.org,espdev/readthedocs.org,LukasBoersma/readthedocs.org,hach-que/readthedocs.org,raven47git/readthedocs.org,Carreau/readthedocs.org,sunnyzwh/readthedocs.org,asampat3090/readthedocs.org,sils1297/readthedocs.org,dirn/readthedocs.org,titiushko/readthedocs.org,titiushko/readthedocs.org,wanghaven/readthedocs.org,alex/readthedocs.org,mhils/readthedocs.org,LukasBoersma/readthedocs.org,kdkeyser/readthedocs.org,stevepiercy/readthedocs.org,asampat3090/readthedocs.org,soulshake/readthedocs.org,attakei/readthedocs-oauth,espdev/readthedocs.org,dirn/readthedocs.org,atsuyim/readthedocs.org,alex/readthedocs.org,michaelmcandrew/readthedocs.org,d0ugal/readthedocs.org,d0ugal/readthedocs.org,wanghaven/readthedocs.org,pombredanne/readthedocs.org,emawind84/readthedocs.org,fujita-shintaro/readthedocs.org,raven47git/readthedocs.org,nyergler/pythonslides,Carreau/readthedocs.org,atsuyim/readthedocs.org,soulshake/readthedocs.org,sils1297/readthedocs.org,alex/readthedocs.org,espdev/readthedocs.org,Carreau/readthedocs.org,kdkeyser/readthedocs.org,wanghaven/readthedocs.org,jerel/readthedocs.org,clarkperkins/readthedocs.org,johncosta/private-readthedocs.org,nyergler/pythonslides,ojii/readthedocs.org,nikolas/readthedocs.org,singingwolfboy/readthedocs.org,tddv/readthedocs.org,nikolas/readthedocs.org,wijerasa/readthedocs.org,VishvajitP/readthedocs.org,raven47git/readthedocs.org,istresearch/readthedocs.org,GovReady/readthedocs.org,johncosta/private-readthedocs.org,sid-kap/readthedocs.org,clarkperkins/readthedocs.org,mrshoki/readthedocs.org,Carreau/readthedocs.org,d0ugal/readthedocs.org,techtonik/readthedocs.org,ojii/readthedocs.org,safwanrahman/readthedocs.org,ojii/readthedocs.org,asampat3090/readthedocs.org,mrshoki/readthedocs.org,attakei/readthedocs-oauth,SteveViss/readthedocs.org,mrshoki/readthedocs.org,nikolas/readthedocs.org,techtonik/readthedocs.org,michaelmcandrew/readthedocs.org,cgourlay/readthedocs.org,jerel/readthedocs.org,davidfischer/readthedocs.org,VishvajitP/readthedocs.org,kdkeyser/readthedocs.org,hach-que/readthedocs.org,GovReady/readthedocs.org,attakei/readthedocs-oauth,dirn/readthedocs.org,CedarLogic/readthedocs.org,sid-kap/readthedocs.org,KamranMackey/readthedocs.org,jerel/readthedocs.org,asampat3090/readthedocs.org,johncosta/private-readthedocs.org,gjtorikian/readthedocs.org,Tazer/readthedocs.org,safwanrahman/readthedocs.org,singingwolfboy/readthedocs.org,soulshake/readthedocs.org,attakei/readthedocs-oauth,gjtorikian/readthedocs.org,wijerasa/readthedocs.org,takluyver/readthedocs.org,davidfischer/readthedocs.org,LukasBoersma/readthedocs.org,LukasBoersma/readthedocs.org,agjohnson/readthedocs.org,safwanrahman/readthedocs.org,emawind84/readthedocs.org,titiushko/readthedocs.org,Tazer/readthedocs.org,mrshoki/readthedocs.org,atsuyim/readthedocs.org,hach-que/readthedocs.org,istresearch/readthedocs.org,agjohnson/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,takluyver/readthedocs.org,nikolas/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,mhils/readthedocs.org
|
from django.test import TestCase
- from readthedocs.core import hacks
+ from core import hacks
class TestHacks(TestCase):
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
hacks.patch_meta_path()
def tearDown(self):
hacks.unpatch_meta_path()
def test_hack_failed_import(self):
import boogy
self.assertTrue(str(boogy), "<Silly Human, I'm not real>")
def test_hack_correct_import(self):
import itertools
self.assertFalse(str(itertools), "<Silly Human, I'm not real>")
|
Fix import to not include project.
|
## Code Before:
from django.test import TestCase
from readthedocs.core import hacks
class TestHacks(TestCase):
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
hacks.patch_meta_path()
def tearDown(self):
hacks.unpatch_meta_path()
def test_hack_failed_import(self):
import boogy
self.assertTrue(str(boogy), "<Silly Human, I'm not real>")
def test_hack_correct_import(self):
import itertools
self.assertFalse(str(itertools), "<Silly Human, I'm not real>")
## Instruction:
Fix import to not include project.
## Code After:
from django.test import TestCase
from core import hacks
class TestHacks(TestCase):
fixtures = ['eric.json', 'test_data.json']
def setUp(self):
hacks.patch_meta_path()
def tearDown(self):
hacks.unpatch_meta_path()
def test_hack_failed_import(self):
import boogy
self.assertTrue(str(boogy), "<Silly Human, I'm not real>")
def test_hack_correct_import(self):
import itertools
self.assertFalse(str(itertools), "<Silly Human, I'm not real>")
|
...
from django.test import TestCase
from core import hacks
...
|
3139ae7dceb3605e70db2cbcde0d732dcb68bc2a
|
serfnode/handler/config.py
|
serfnode/handler/config.py
|
import os
import uuid
from mischief.actors.pipe import get_local_ip
import yaml
def read_serfnode_yml():
with open('/serfnode.yml') as input:
conf = yaml.load(input) or {}
return conf.get('serfnode') or {}
yml = read_serfnode_yml()
role = os.environ.get('ROLE') or yml.get('ROLE') or 'no_role'
peer = os.environ.get('PEER') or yml.get('PEER')
ip = (os.environ.get('SERF_IP') or yml.get('SERF_IP') or
get_local_ip('8.8.8.8'))
bind_port = os.environ.get('SERF_PORT') or yml.get('SERF_PORT') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
rpc_port = os.environ.get('RPC_PORT') or 7373
service = os.environ.get('SERVICE_IP') or yml.get('SERVICE_IP')
service_port = os.environ.get('SERVICE_PORT') or yml.get('SERVICE_PORT') or 0
|
import os
import uuid
from mischief.actors.pipe import get_local_ip
import yaml
def read_serfnode_yml():
with open('/serfnode.yml') as input:
conf = yaml.load(input) or {}
return conf.get('serfnode') or {}
yml = read_serfnode_yml()
role = os.environ.get('ROLE') or yml.get('role') or 'no_role'
peer = os.environ.get('PEER') or yml.get('peer')
ip = (os.environ.get('SERF_IP') or yml.get('serf_ip') or
get_local_ip('8.8.8.8'))
bind_port = os.environ.get('SERF_PORT') or yml.get('serf_port') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
rpc_port = os.environ.get('RPC_PORT') or 7373
service = os.environ.get('SERVICE_IP') or yml.get('service_ip')
service_port = os.environ.get('SERVICE_PORT') or yml.get('service_port') or 0
|
Make yaml fields lowercase in serfnode section
|
Make yaml fields lowercase in serfnode section
|
Python
|
mit
|
waltermoreira/serfnode,waltermoreira/serfnode,waltermoreira/serfnode
|
import os
import uuid
from mischief.actors.pipe import get_local_ip
import yaml
def read_serfnode_yml():
with open('/serfnode.yml') as input:
conf = yaml.load(input) or {}
return conf.get('serfnode') or {}
yml = read_serfnode_yml()
- role = os.environ.get('ROLE') or yml.get('ROLE') or 'no_role'
+ role = os.environ.get('ROLE') or yml.get('role') or 'no_role'
- peer = os.environ.get('PEER') or yml.get('PEER')
+ peer = os.environ.get('PEER') or yml.get('peer')
- ip = (os.environ.get('SERF_IP') or yml.get('SERF_IP') or
+ ip = (os.environ.get('SERF_IP') or yml.get('serf_ip') or
get_local_ip('8.8.8.8'))
- bind_port = os.environ.get('SERF_PORT') or yml.get('SERF_PORT') or 7946
+ bind_port = os.environ.get('SERF_PORT') or yml.get('serf_port') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
rpc_port = os.environ.get('RPC_PORT') or 7373
- service = os.environ.get('SERVICE_IP') or yml.get('SERVICE_IP')
+ service = os.environ.get('SERVICE_IP') or yml.get('service_ip')
- service_port = os.environ.get('SERVICE_PORT') or yml.get('SERVICE_PORT') or 0
+ service_port = os.environ.get('SERVICE_PORT') or yml.get('service_port') or 0
|
Make yaml fields lowercase in serfnode section
|
## Code Before:
import os
import uuid
from mischief.actors.pipe import get_local_ip
import yaml
def read_serfnode_yml():
with open('/serfnode.yml') as input:
conf = yaml.load(input) or {}
return conf.get('serfnode') or {}
yml = read_serfnode_yml()
role = os.environ.get('ROLE') or yml.get('ROLE') or 'no_role'
peer = os.environ.get('PEER') or yml.get('PEER')
ip = (os.environ.get('SERF_IP') or yml.get('SERF_IP') or
get_local_ip('8.8.8.8'))
bind_port = os.environ.get('SERF_PORT') or yml.get('SERF_PORT') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
rpc_port = os.environ.get('RPC_PORT') or 7373
service = os.environ.get('SERVICE_IP') or yml.get('SERVICE_IP')
service_port = os.environ.get('SERVICE_PORT') or yml.get('SERVICE_PORT') or 0
## Instruction:
Make yaml fields lowercase in serfnode section
## Code After:
import os
import uuid
from mischief.actors.pipe import get_local_ip
import yaml
def read_serfnode_yml():
with open('/serfnode.yml') as input:
conf = yaml.load(input) or {}
return conf.get('serfnode') or {}
yml = read_serfnode_yml()
role = os.environ.get('ROLE') or yml.get('role') or 'no_role'
peer = os.environ.get('PEER') or yml.get('peer')
ip = (os.environ.get('SERF_IP') or yml.get('serf_ip') or
get_local_ip('8.8.8.8'))
bind_port = os.environ.get('SERF_PORT') or yml.get('serf_port') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
rpc_port = os.environ.get('RPC_PORT') or 7373
service = os.environ.get('SERVICE_IP') or yml.get('service_ip')
service_port = os.environ.get('SERVICE_PORT') or yml.get('service_port') or 0
|
# ... existing code ...
role = os.environ.get('ROLE') or yml.get('role') or 'no_role'
peer = os.environ.get('PEER') or yml.get('peer')
ip = (os.environ.get('SERF_IP') or yml.get('serf_ip') or
get_local_ip('8.8.8.8'))
bind_port = os.environ.get('SERF_PORT') or yml.get('serf_port') or 7946
node = os.environ.get('NODE_NAME') or uuid.uuid4().hex
# ... modified code ...
rpc_port = os.environ.get('RPC_PORT') or 7373
service = os.environ.get('SERVICE_IP') or yml.get('service_ip')
service_port = os.environ.get('SERVICE_PORT') or yml.get('service_port') or 0
# ... rest of the code ...
|
eaff795bddb0e07f4ad4e4c9277c5c0f6f199380
|
salt/beacons/__init__.py
|
salt/beacons/__init__.py
|
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
ret.append({'tag': tag, 'data': data})
return ret
|
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
if not 'id' in data:
data['id'] = self.opts['id']
ret.append({'tag': tag, 'data': data})
return ret
|
Add id tot he beacon event dataset
|
Add id tot he beacon event dataset
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
+ if not 'id' in data:
+ data['id'] = self.opts['id']
ret.append({'tag': tag, 'data': data})
return ret
|
Add id tot he beacon event dataset
|
## Code Before:
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
ret.append({'tag': tag, 'data': data})
return ret
## Instruction:
Add id tot he beacon event dataset
## Code After:
'''
This package contains the loader modules for the salt streams system
'''
# Import salt libs
import salt.loader
class Beacon(object):
'''
This class is used to eveluate and execute on the beacon system
'''
def __init__(self, opts):
self.opts = opts
self.beacons = salt.loader.beacons(opts)
def process(self, config):
'''
Process the configured beacons
The config must be a dict and looks like this in yaml
code_block:: yaml
beacons:
inotify:
- /etc/fstab
- /var/cache/foo/*
'''
ret = []
for mod in config:
fun_str = '{0}.beacon'.format(mod)
if fun_str in self.beacons:
tag = 'salt/beacon/{0}/{1}/'.format(self.opts['id'], mod)
raw = self.beacons[fun_str](config[mod])
for data in raw:
if 'tag' in data:
tag += data.pop('tag')
if not 'id' in data:
data['id'] = self.opts['id']
ret.append({'tag': tag, 'data': data})
return ret
|
# ... existing code ...
tag += data.pop('tag')
if not 'id' in data:
data['id'] = self.opts['id']
ret.append({'tag': tag, 'data': data})
# ... rest of the code ...
|
39fbce2a0e225591423f9b2d1edd111822063466
|
app/core/api.py
|
app/core/api.py
|
from flask import jsonify, request
from ..main import app
@app.route('/api/ip')
def api_ip():
return jsonify({'Success': True, 'ipAddress': get_client_ip()})
def get_client_ip():
return request.headers.get('X-Forwarded-For') or request.remote_addr
|
from flask import jsonify, request
from ..main import app
@app.route('/api/ip')
def api_ip():
"""Return client IP"""
return api_reply({'ipAddress': get_client_ip()})
def get_client_ip():
"""Return the client x-forwarded-for header or IP address"""
return request.headers.get('X-Forwarded-For') or request.remote_addr
def api_reply(body={}, success=True):
"""Create a standard API reply interface"""
return jsonify({**body, 'success': success})
|
Add a standard API reply interface
|
Add a standard API reply interface
|
Python
|
mit
|
jniedrauer/jniedrauer.com,jniedrauer/jniedrauer.com,jniedrauer/jniedrauer.com
|
from flask import jsonify, request
from ..main import app
@app.route('/api/ip')
def api_ip():
+ """Return client IP"""
- return jsonify({'Success': True, 'ipAddress': get_client_ip()})
+ return api_reply({'ipAddress': get_client_ip()})
def get_client_ip():
+ """Return the client x-forwarded-for header or IP address"""
return request.headers.get('X-Forwarded-For') or request.remote_addr
+
+ def api_reply(body={}, success=True):
+ """Create a standard API reply interface"""
+ return jsonify({**body, 'success': success})
+
|
Add a standard API reply interface
|
## Code Before:
from flask import jsonify, request
from ..main import app
@app.route('/api/ip')
def api_ip():
return jsonify({'Success': True, 'ipAddress': get_client_ip()})
def get_client_ip():
return request.headers.get('X-Forwarded-For') or request.remote_addr
## Instruction:
Add a standard API reply interface
## Code After:
from flask import jsonify, request
from ..main import app
@app.route('/api/ip')
def api_ip():
"""Return client IP"""
return api_reply({'ipAddress': get_client_ip()})
def get_client_ip():
"""Return the client x-forwarded-for header or IP address"""
return request.headers.get('X-Forwarded-For') or request.remote_addr
def api_reply(body={}, success=True):
"""Create a standard API reply interface"""
return jsonify({**body, 'success': success})
|
// ... existing code ...
def api_ip():
"""Return client IP"""
return api_reply({'ipAddress': get_client_ip()})
// ... modified code ...
def get_client_ip():
"""Return the client x-forwarded-for header or IP address"""
return request.headers.get('X-Forwarded-For') or request.remote_addr
def api_reply(body={}, success=True):
"""Create a standard API reply interface"""
return jsonify({**body, 'success': success})
// ... rest of the code ...
|
84816dda37d071e521f65449ee59c992b5e302bc
|
megaprojects/blog/models.py
|
megaprojects/blog/models.py
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from core.models import AuthorModel, ImageModel
from .managers import PostManager, ImageManager
import util
STATUS_CHOICES = [('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')]
class Post(AuthorModel):
pubdate = models.DateTimeField('publication date', default=timezone.now())
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
body = models.TextField()
drupal_id = models.IntegerField('drupal NID', unique=True, blank=True,
null=True, help_text='Node ID from the previous Drupal website (imported).')
objects = PostManager()
class Meta:
ordering = ['-pubdate']
class Image(ImageModel):
image = models.ImageField(upload_to=util.get_image_path)
post = models.ForeignKey(Post)
objects = ImageManager()
class Meta:
ordering = ['-post__pubdate', '-created']
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from core.models import AuthorModel, ImageModel
from .managers import PostManager, ImageManager
import util
STATUS_CHOICES = [('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')]
class Post(AuthorModel):
pubdate = models.DateTimeField('publication date', default=timezone.now())
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
body = models.TextField()
drupal_id = models.IntegerField('drupal NID', unique=True, blank=True,
null=True, help_text='Node ID from the previous Drupal website (imported).')
objects = PostManager()
@property
def thumbnail(self):
if self.image_set.published():
return self.image_set.published()[:1].get()
class Meta:
ordering = ['-pubdate']
class Image(ImageModel):
image = models.ImageField(upload_to=util.get_image_path)
post = models.ForeignKey(Post)
objects = ImageManager()
class Meta:
ordering = ['-post__pubdate', '-created']
|
Add property for Post thumbnail
|
Add property for Post thumbnail
|
Python
|
apache-2.0
|
megaprojectske/megaprojects.co.ke,megaprojectske/megaprojects.co.ke,megaprojectske/megaprojects.co.ke
|
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from core.models import AuthorModel, ImageModel
from .managers import PostManager, ImageManager
import util
STATUS_CHOICES = [('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')]
class Post(AuthorModel):
pubdate = models.DateTimeField('publication date', default=timezone.now())
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
body = models.TextField()
drupal_id = models.IntegerField('drupal NID', unique=True, blank=True,
null=True, help_text='Node ID from the previous Drupal website (imported).')
objects = PostManager()
+ @property
+ def thumbnail(self):
+ if self.image_set.published():
+ return self.image_set.published()[:1].get()
+
class Meta:
ordering = ['-pubdate']
class Image(ImageModel):
image = models.ImageField(upload_to=util.get_image_path)
post = models.ForeignKey(Post)
objects = ImageManager()
class Meta:
ordering = ['-post__pubdate', '-created']
|
Add property for Post thumbnail
|
## Code Before:
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from core.models import AuthorModel, ImageModel
from .managers import PostManager, ImageManager
import util
STATUS_CHOICES = [('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')]
class Post(AuthorModel):
pubdate = models.DateTimeField('publication date', default=timezone.now())
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
body = models.TextField()
drupal_id = models.IntegerField('drupal NID', unique=True, blank=True,
null=True, help_text='Node ID from the previous Drupal website (imported).')
objects = PostManager()
class Meta:
ordering = ['-pubdate']
class Image(ImageModel):
image = models.ImageField(upload_to=util.get_image_path)
post = models.ForeignKey(Post)
objects = ImageManager()
class Meta:
ordering = ['-post__pubdate', '-created']
## Instruction:
Add property for Post thumbnail
## Code After:
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from core.models import AuthorModel, ImageModel
from .managers import PostManager, ImageManager
import util
STATUS_CHOICES = [('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')]
class Post(AuthorModel):
pubdate = models.DateTimeField('publication date', default=timezone.now())
status = models.CharField(max_length=1, choices=STATUS_CHOICES)
body = models.TextField()
drupal_id = models.IntegerField('drupal NID', unique=True, blank=True,
null=True, help_text='Node ID from the previous Drupal website (imported).')
objects = PostManager()
@property
def thumbnail(self):
if self.image_set.published():
return self.image_set.published()[:1].get()
class Meta:
ordering = ['-pubdate']
class Image(ImageModel):
image = models.ImageField(upload_to=util.get_image_path)
post = models.ForeignKey(Post)
objects = ImageManager()
class Meta:
ordering = ['-post__pubdate', '-created']
|
# ... existing code ...
@property
def thumbnail(self):
if self.image_set.published():
return self.image_set.published()[:1].get()
class Meta:
# ... rest of the code ...
|
4a4cb336839d42cee872e52399e17249b948492a
|
rackattack/common/globallock.py
|
rackattack/common/globallock.py
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
Print the stack info more clearly, when holding the global lock for too long
|
Print the stack info more clearly, when holding the global lock for too long
|
Python
|
apache-2.0
|
Stratoscale/rackattack-virtual,eliran-stratoscale/rackattack-virtual,Stratoscale/rackattack-virtual,eliran-stratoscale/rackattack-virtual
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
+
+
+ def prettyStack():
+ return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
- took=took, stack=traceback.format_stack()))
+ took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
- took=took, stack=traceback.format_stack()))
+ took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
Print the stack info more clearly, when holding the global lock for too long
|
## Code Before:
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
## Instruction:
Print the stack info more clearly, when holding the global lock for too long
## Code After:
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
...
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
...
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
...
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
...
|
44faefd4bd0bfa3dede8686903759a033c1072d6
|
flask_simple_serializer/response.py
|
flask_simple_serializer/response.py
|
import json
from flask import Response as SimpleResponse
from .status_codes import HTTP_200_OK
from .serializers import BaseSerializer
class Response(SimpleResponse):
def __init__(self, data, headers=None, status_code=HTTP_200_OK):
"""
For now the content/type always will be application/json.
We can change it to make a Web Browseable API
"""
if isinstance(data, BaseSerializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.errors`. representation.'
)
raise AssertionError(msg)
data = json.dumps(data)
content_type = "application/json"
super(Response, self).__init__(
data, headers=None, content_type=content_type, status=status_code
)
|
from flask import Response as SimpleResponse
from flask import json
from .status_codes import HTTP_200_OK
from .serializers import BaseSerializer
class Response(SimpleResponse):
def __init__(self, data, headers=None, status_code=HTTP_200_OK):
"""
For now the content/type always will be application/json.
We can change it to make a Web Browseable API
"""
if isinstance(data, BaseSerializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.errors`. representation.'
)
raise AssertionError(msg)
data = json.dumps(data)
content_type = "application/json"
super(Response, self).__init__(
data, headers=None, content_type=content_type, status=status_code
)
|
Replace json for flask.json to manage the Response
|
Replace json for flask.json to manage the Response
|
Python
|
mit
|
marcosschroh/Flask-Simple-Serializer
|
- import json
-
from flask import Response as SimpleResponse
+ from flask import json
from .status_codes import HTTP_200_OK
from .serializers import BaseSerializer
class Response(SimpleResponse):
def __init__(self, data, headers=None, status_code=HTTP_200_OK):
"""
For now the content/type always will be application/json.
We can change it to make a Web Browseable API
"""
if isinstance(data, BaseSerializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.errors`. representation.'
)
raise AssertionError(msg)
data = json.dumps(data)
content_type = "application/json"
super(Response, self).__init__(
data, headers=None, content_type=content_type, status=status_code
)
|
Replace json for flask.json to manage the Response
|
## Code Before:
import json
from flask import Response as SimpleResponse
from .status_codes import HTTP_200_OK
from .serializers import BaseSerializer
class Response(SimpleResponse):
def __init__(self, data, headers=None, status_code=HTTP_200_OK):
"""
For now the content/type always will be application/json.
We can change it to make a Web Browseable API
"""
if isinstance(data, BaseSerializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.errors`. representation.'
)
raise AssertionError(msg)
data = json.dumps(data)
content_type = "application/json"
super(Response, self).__init__(
data, headers=None, content_type=content_type, status=status_code
)
## Instruction:
Replace json for flask.json to manage the Response
## Code After:
from flask import Response as SimpleResponse
from flask import json
from .status_codes import HTTP_200_OK
from .serializers import BaseSerializer
class Response(SimpleResponse):
def __init__(self, data, headers=None, status_code=HTTP_200_OK):
"""
For now the content/type always will be application/json.
We can change it to make a Web Browseable API
"""
if isinstance(data, BaseSerializer):
msg = (
'You passed a Serializer instance as data, but '
'probably meant to pass serialized `.data` or '
'`.errors`. representation.'
)
raise AssertionError(msg)
data = json.dumps(data)
content_type = "application/json"
super(Response, self).__init__(
data, headers=None, content_type=content_type, status=status_code
)
|
// ... existing code ...
from flask import Response as SimpleResponse
from flask import json
// ... rest of the code ...
|
d63d6070576bf22d60bf9684e417163201814353
|
webapp/worker.py
|
webapp/worker.py
|
"""Run a worker for the job queue."""
import os
import sys
from redis import StrictRedis
from rq import Connection, Queue, Worker
from bootstrap.util import app_context
if __name__ == '__main__':
config_name = os.environ.get('ENVIRONMENT')
if config_name is None:
sys.stderr.write("Environment variable 'ENVIRONMENT' must be set but isn't.")
sys.exit()
with app_context(config_name) as app:
redis = StrictRedis(app.config['REDIS_URL'])
with Connection(redis):
queues = [Queue()]
worker = Worker(queues)
worker.work()
|
"""Run a worker for the job queue."""
import sys
from redis import StrictRedis
from rq import Connection, Queue, Worker
from bootstrap.util import app_context, get_config_name_from_env
if __name__ == '__main__':
try:
config_name = get_config_name_from_env()
except Exception as e:
sys.stderr.write(str(e) + '\n')
sys.exit()
with app_context(config_name) as app:
redis = StrictRedis(app.config['REDIS_URL'])
with Connection(redis):
queues = [Queue()]
worker = Worker(queues)
worker.work()
|
Use bootstrap utility to retrieve the configuration name from the environment.
|
Use bootstrap utility to retrieve the configuration name from the environment.
|
Python
|
bsd-3-clause
|
m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps
|
"""Run a worker for the job queue."""
- import os
import sys
from redis import StrictRedis
from rq import Connection, Queue, Worker
- from bootstrap.util import app_context
+ from bootstrap.util import app_context, get_config_name_from_env
if __name__ == '__main__':
- config_name = os.environ.get('ENVIRONMENT')
- if config_name is None:
- sys.stderr.write("Environment variable 'ENVIRONMENT' must be set but isn't.")
+ try:
+ config_name = get_config_name_from_env()
+ except Exception as e:
+ sys.stderr.write(str(e) + '\n')
sys.exit()
with app_context(config_name) as app:
redis = StrictRedis(app.config['REDIS_URL'])
with Connection(redis):
queues = [Queue()]
worker = Worker(queues)
worker.work()
|
Use bootstrap utility to retrieve the configuration name from the environment.
|
## Code Before:
"""Run a worker for the job queue."""
import os
import sys
from redis import StrictRedis
from rq import Connection, Queue, Worker
from bootstrap.util import app_context
if __name__ == '__main__':
config_name = os.environ.get('ENVIRONMENT')
if config_name is None:
sys.stderr.write("Environment variable 'ENVIRONMENT' must be set but isn't.")
sys.exit()
with app_context(config_name) as app:
redis = StrictRedis(app.config['REDIS_URL'])
with Connection(redis):
queues = [Queue()]
worker = Worker(queues)
worker.work()
## Instruction:
Use bootstrap utility to retrieve the configuration name from the environment.
## Code After:
"""Run a worker for the job queue."""
import sys
from redis import StrictRedis
from rq import Connection, Queue, Worker
from bootstrap.util import app_context, get_config_name_from_env
if __name__ == '__main__':
try:
config_name = get_config_name_from_env()
except Exception as e:
sys.stderr.write(str(e) + '\n')
sys.exit()
with app_context(config_name) as app:
redis = StrictRedis(app.config['REDIS_URL'])
with Connection(redis):
queues = [Queue()]
worker = Worker(queues)
worker.work()
|
// ... existing code ...
import sys
// ... modified code ...
from bootstrap.util import app_context, get_config_name_from_env
...
if __name__ == '__main__':
try:
config_name = get_config_name_from_env()
except Exception as e:
sys.stderr.write(str(e) + '\n')
sys.exit()
// ... rest of the code ...
|
5d9eabe588231444083d13dc50371ea6952d445e
|
mirrit/web/models.py
|
mirrit/web/models.py
|
from bson.objectid import ObjectId
from humbledb import Mongo, Document
class ClassProperty (property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class User(Document):
username = ''
password = ''
email = ''
config_database = 'mirrit'
config_collection = 'users'
@property
def id(self):
return unicode(self._id)
@property
def user_id(self):
return unicode(self._id)
@staticmethod
def get_by_login(cls, username, password):
with Mongo:
return cls.find({'username': username,
'password': password})
def persist(self):
with Mongo:
if self._id:
super(User, self).__self_class__.update(
{'_id': self._id}, self, w=1)
else:
super(User, self).__self_class__.insert(self, w=1)
class Wrapper(object):
def get(self, id):
with Mongo:
return User.find({'_id': ObjectId(id)})
wrapper = Wrapper()
User.query = wrapper
|
from bson.objectid import ObjectId
from humbledb import Mongo, Document
class ClassProperty (property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class User(Document):
username = ''
password = ''
email = ''
github_access_token = ''
config_database = 'mirrit'
config_collection = 'users'
@property
def id(self):
return unicode(self._id)
@property
def user_id(self):
return unicode(self._id)
@staticmethod
def get_by_login(cls, username, password):
with Mongo:
return cls.find({'username': username,
'password': password})
def persist(self):
with Mongo:
if self._id:
super(User, self).__self_class__.update(
{'_id': self._id}, self, w=1)
else:
super(User, self).__self_class__.insert(self, w=1)
class Wrapper(object):
def get(self, id):
with Mongo:
return User.find({'_id': ObjectId(id)})
wrapper = Wrapper()
User.query = wrapper
|
Add github token to model
|
Add github token to model
|
Python
|
bsd-3-clause
|
1stvamp/mirrit
|
from bson.objectid import ObjectId
from humbledb import Mongo, Document
class ClassProperty (property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class User(Document):
username = ''
password = ''
email = ''
+ github_access_token = ''
config_database = 'mirrit'
config_collection = 'users'
@property
def id(self):
return unicode(self._id)
@property
def user_id(self):
return unicode(self._id)
@staticmethod
def get_by_login(cls, username, password):
with Mongo:
return cls.find({'username': username,
'password': password})
def persist(self):
with Mongo:
if self._id:
super(User, self).__self_class__.update(
{'_id': self._id}, self, w=1)
else:
super(User, self).__self_class__.insert(self, w=1)
class Wrapper(object):
def get(self, id):
with Mongo:
return User.find({'_id': ObjectId(id)})
wrapper = Wrapper()
User.query = wrapper
|
Add github token to model
|
## Code Before:
from bson.objectid import ObjectId
from humbledb import Mongo, Document
class ClassProperty (property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class User(Document):
username = ''
password = ''
email = ''
config_database = 'mirrit'
config_collection = 'users'
@property
def id(self):
return unicode(self._id)
@property
def user_id(self):
return unicode(self._id)
@staticmethod
def get_by_login(cls, username, password):
with Mongo:
return cls.find({'username': username,
'password': password})
def persist(self):
with Mongo:
if self._id:
super(User, self).__self_class__.update(
{'_id': self._id}, self, w=1)
else:
super(User, self).__self_class__.insert(self, w=1)
class Wrapper(object):
def get(self, id):
with Mongo:
return User.find({'_id': ObjectId(id)})
wrapper = Wrapper()
User.query = wrapper
## Instruction:
Add github token to model
## Code After:
from bson.objectid import ObjectId
from humbledb import Mongo, Document
class ClassProperty (property):
"""Subclass property to make classmethod properties possible"""
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
class User(Document):
username = ''
password = ''
email = ''
github_access_token = ''
config_database = 'mirrit'
config_collection = 'users'
@property
def id(self):
return unicode(self._id)
@property
def user_id(self):
return unicode(self._id)
@staticmethod
def get_by_login(cls, username, password):
with Mongo:
return cls.find({'username': username,
'password': password})
def persist(self):
with Mongo:
if self._id:
super(User, self).__self_class__.update(
{'_id': self._id}, self, w=1)
else:
super(User, self).__self_class__.insert(self, w=1)
class Wrapper(object):
def get(self, id):
with Mongo:
return User.find({'_id': ObjectId(id)})
wrapper = Wrapper()
User.query = wrapper
|
...
email = ''
github_access_token = ''
config_database = 'mirrit'
...
|
2aa415cae1cb7ed0bb2b7fdaf51d9d5eaceaa768
|
sweettooth/extensions/admin.py
|
sweettooth/extensions/admin.py
|
from django.contrib import admin
from extensions.models import Extension, ExtensionVersion
from extensions.models import STATUS_ACTIVE, STATUS_REJECTED
from review.models import CodeReview
class CodeReviewAdmin(admin.TabularInline):
model = CodeReview
fields = 'reviewer', 'comments',
class ExtensionVersionAdmin(admin.ModelAdmin):
list_display = 'title', 'status',
list_display_links = 'title',
actions = 'approve', 'reject',
def title(self, ver):
return "%s (%d)" % (ver.extension.uuid, ver.version)
title.short_description = "Extension (version)"
inlines = [CodeReviewAdmin]
def approve(self, request, queryset):
queryset.update(status=STATUS_ACTIVE)
def reject(self, request, queryset):
queryset.update(status=STATUS_REJECTED)
admin.site.register(ExtensionVersion, ExtensionVersionAdmin)
class ExtensionVersionInline(admin.TabularInline):
model = ExtensionVersion
fields = 'version', 'status',
extra = 0
class ExtensionAdmin(admin.ModelAdmin):
list_display = 'name', 'uuid', 'num_versions', 'creator',
list_display_links = 'name', 'uuid',
search_fields = ('uuid', 'name')
def num_versions(self, ext):
return ext.versions.count()
num_versions.short_description = "#V"
inlines = [ExtensionVersionInline]
admin.site.register(Extension, ExtensionAdmin)
|
from django.contrib import admin
from extensions.models import Extension, ExtensionVersion
from extensions.models import STATUS_ACTIVE, STATUS_REJECTED
from review.models import CodeReview
class CodeReviewAdmin(admin.TabularInline):
model = CodeReview
fields = 'reviewer', 'comments',
class ExtensionVersionAdmin(admin.ModelAdmin):
list_display = 'title', 'status',
list_display_links = 'title',
actions = 'approve', 'reject',
def title(self, ver):
return "%s (%d)" % (ver.extension.uuid, ver.version)
title.short_description = "Extension (version)"
inlines = [CodeReviewAdmin]
def approve(self, request, queryset):
queryset.update(status=STATUS_ACTIVE)
def reject(self, request, queryset):
queryset.update(status=STATUS_REJECTED)
admin.site.register(ExtensionVersion, ExtensionVersionAdmin)
class ExtensionVersionInline(admin.TabularInline):
model = ExtensionVersion
fields = 'version', 'status',
extra = 0
class ExtensionAdmin(admin.ModelAdmin):
list_display = 'name', 'uuid', 'num_versions', 'creator',
list_display_links = 'name', 'uuid',
search_fields = ('uuid', 'name')
raw_id_fields = ('user',)
def num_versions(self, ext):
return ext.versions.count()
num_versions.short_description = "#V"
inlines = [ExtensionVersionInline]
admin.site.register(Extension, ExtensionAdmin)
|
Make the user field into a raw field
|
extensions: Make the user field into a raw field
It's a bit annoying having to navigate through a 20,000 line combobox.
|
Python
|
agpl-3.0
|
GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,GNOME/extensions-web,magcius/sweettooth,magcius/sweettooth
|
from django.contrib import admin
from extensions.models import Extension, ExtensionVersion
from extensions.models import STATUS_ACTIVE, STATUS_REJECTED
from review.models import CodeReview
class CodeReviewAdmin(admin.TabularInline):
model = CodeReview
fields = 'reviewer', 'comments',
class ExtensionVersionAdmin(admin.ModelAdmin):
list_display = 'title', 'status',
list_display_links = 'title',
actions = 'approve', 'reject',
def title(self, ver):
return "%s (%d)" % (ver.extension.uuid, ver.version)
title.short_description = "Extension (version)"
inlines = [CodeReviewAdmin]
def approve(self, request, queryset):
queryset.update(status=STATUS_ACTIVE)
def reject(self, request, queryset):
queryset.update(status=STATUS_REJECTED)
admin.site.register(ExtensionVersion, ExtensionVersionAdmin)
class ExtensionVersionInline(admin.TabularInline):
model = ExtensionVersion
fields = 'version', 'status',
extra = 0
class ExtensionAdmin(admin.ModelAdmin):
list_display = 'name', 'uuid', 'num_versions', 'creator',
list_display_links = 'name', 'uuid',
search_fields = ('uuid', 'name')
+ raw_id_fields = ('user',)
def num_versions(self, ext):
return ext.versions.count()
num_versions.short_description = "#V"
inlines = [ExtensionVersionInline]
admin.site.register(Extension, ExtensionAdmin)
|
Make the user field into a raw field
|
## Code Before:
from django.contrib import admin
from extensions.models import Extension, ExtensionVersion
from extensions.models import STATUS_ACTIVE, STATUS_REJECTED
from review.models import CodeReview
class CodeReviewAdmin(admin.TabularInline):
model = CodeReview
fields = 'reviewer', 'comments',
class ExtensionVersionAdmin(admin.ModelAdmin):
list_display = 'title', 'status',
list_display_links = 'title',
actions = 'approve', 'reject',
def title(self, ver):
return "%s (%d)" % (ver.extension.uuid, ver.version)
title.short_description = "Extension (version)"
inlines = [CodeReviewAdmin]
def approve(self, request, queryset):
queryset.update(status=STATUS_ACTIVE)
def reject(self, request, queryset):
queryset.update(status=STATUS_REJECTED)
admin.site.register(ExtensionVersion, ExtensionVersionAdmin)
class ExtensionVersionInline(admin.TabularInline):
model = ExtensionVersion
fields = 'version', 'status',
extra = 0
class ExtensionAdmin(admin.ModelAdmin):
list_display = 'name', 'uuid', 'num_versions', 'creator',
list_display_links = 'name', 'uuid',
search_fields = ('uuid', 'name')
def num_versions(self, ext):
return ext.versions.count()
num_versions.short_description = "#V"
inlines = [ExtensionVersionInline]
admin.site.register(Extension, ExtensionAdmin)
## Instruction:
Make the user field into a raw field
## Code After:
from django.contrib import admin
from extensions.models import Extension, ExtensionVersion
from extensions.models import STATUS_ACTIVE, STATUS_REJECTED
from review.models import CodeReview
class CodeReviewAdmin(admin.TabularInline):
model = CodeReview
fields = 'reviewer', 'comments',
class ExtensionVersionAdmin(admin.ModelAdmin):
list_display = 'title', 'status',
list_display_links = 'title',
actions = 'approve', 'reject',
def title(self, ver):
return "%s (%d)" % (ver.extension.uuid, ver.version)
title.short_description = "Extension (version)"
inlines = [CodeReviewAdmin]
def approve(self, request, queryset):
queryset.update(status=STATUS_ACTIVE)
def reject(self, request, queryset):
queryset.update(status=STATUS_REJECTED)
admin.site.register(ExtensionVersion, ExtensionVersionAdmin)
class ExtensionVersionInline(admin.TabularInline):
model = ExtensionVersion
fields = 'version', 'status',
extra = 0
class ExtensionAdmin(admin.ModelAdmin):
list_display = 'name', 'uuid', 'num_versions', 'creator',
list_display_links = 'name', 'uuid',
search_fields = ('uuid', 'name')
raw_id_fields = ('user',)
def num_versions(self, ext):
return ext.versions.count()
num_versions.short_description = "#V"
inlines = [ExtensionVersionInline]
admin.site.register(Extension, ExtensionAdmin)
|
// ... existing code ...
search_fields = ('uuid', 'name')
raw_id_fields = ('user',)
// ... rest of the code ...
|
b7559972bc28532108027784a05e8ffc43cb398a
|
tests/test_models.py
|
tests/test_models.py
|
import os
import shutil
import unittest
from responsive_wrapper import models
class TestResponsive_wrapper(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
from django.conf import settings
from django.test import TestCase
from responsive_wrapper import models
class TestResponsive_wrapper(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
Replace unittest.TestCase with Django’s own TestCase.
|
Replace unittest.TestCase with Django’s own TestCase.
|
Python
|
bsd-3-clause
|
mishbahr/djangocms-responsive-wrapper,mishbahr/djangocms-responsive-wrapper,mishbahr/djangocms-responsive-wrapper
|
+ from django.conf import settings
+ from django.test import TestCase
- import os
- import shutil
- import unittest
from responsive_wrapper import models
- class TestResponsive_wrapper(unittest.TestCase):
+ class TestResponsive_wrapper(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
Replace unittest.TestCase with Django’s own TestCase.
|
## Code Before:
import os
import shutil
import unittest
from responsive_wrapper import models
class TestResponsive_wrapper(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
## Instruction:
Replace unittest.TestCase with Django’s own TestCase.
## Code After:
from django.conf import settings
from django.test import TestCase
from responsive_wrapper import models
class TestResponsive_wrapper(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
|
// ... existing code ...
from django.conf import settings
from django.test import TestCase
// ... modified code ...
class TestResponsive_wrapper(TestCase):
// ... rest of the code ...
|
35fe7bb6411c8009253bf66fb7739a5d49a7028d
|
scuole/counties/management/commands/bootstrapcounties.py
|
scuole/counties/management/commands/bootstrapcounties.py
|
from __future__ import absolute_import, unicode_literals
import csv
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import County
from scuole.states.models import State
class Command(BaseCommand):
help = 'Bootstraps County models using DSHS county list.'
def handle(self, *args, **options):
self.texas = State.objects.get(name='Texas')
counties_file = os.path.join(
settings.DATA_FOLDER, 'counties', 'counties.csv')
with open(counties_file, 'rU') as f:
reader = csv.DictReader(f)
counties = []
for row in reader:
counties.append(self.create_county(row))
County.objects.bulk_create(counties)
def create_county(self, county):
return County(
name=county['County Name'],
slug=slugify(county['County Name']),
fips=county['FIPS #'],
state=self.texas,
)
|
from __future__ import absolute_import, unicode_literals
import csv
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import County
from scuole.states.models import State
class Command(BaseCommand):
help = 'Bootstraps County models using DSHS county list.'
def handle(self, *args, **options):
self.texas = State.objects.get(name='Texas')
counties_file = os.path.join(
settings.DATA_FOLDER, 'counties', 'counties.csv')
with open(counties_file, 'rU') as f:
reader = csv.DictReader(f)
counties = []
for row in reader:
counties.append(self.create_county(row))
County.objects.bulk_create(counties)
def create_county(self, county):
self.stdout.write(
'Creating {} County...'.format(county['County Name']))
return County(
name=county['County Name'],
slug=slugify(county['County Name']),
fips=county['FIPS #'],
state=self.texas,
)
|
Add feedback during county loader
|
Add feedback during county loader
|
Python
|
mit
|
texastribune/scuole,texastribune/scuole,texastribune/scuole,texastribune/scuole
|
from __future__ import absolute_import, unicode_literals
import csv
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import County
from scuole.states.models import State
class Command(BaseCommand):
help = 'Bootstraps County models using DSHS county list.'
def handle(self, *args, **options):
self.texas = State.objects.get(name='Texas')
counties_file = os.path.join(
settings.DATA_FOLDER, 'counties', 'counties.csv')
with open(counties_file, 'rU') as f:
reader = csv.DictReader(f)
counties = []
for row in reader:
counties.append(self.create_county(row))
County.objects.bulk_create(counties)
def create_county(self, county):
+ self.stdout.write(
+ 'Creating {} County...'.format(county['County Name']))
+
return County(
name=county['County Name'],
slug=slugify(county['County Name']),
fips=county['FIPS #'],
state=self.texas,
)
|
Add feedback during county loader
|
## Code Before:
from __future__ import absolute_import, unicode_literals
import csv
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import County
from scuole.states.models import State
class Command(BaseCommand):
help = 'Bootstraps County models using DSHS county list.'
def handle(self, *args, **options):
self.texas = State.objects.get(name='Texas')
counties_file = os.path.join(
settings.DATA_FOLDER, 'counties', 'counties.csv')
with open(counties_file, 'rU') as f:
reader = csv.DictReader(f)
counties = []
for row in reader:
counties.append(self.create_county(row))
County.objects.bulk_create(counties)
def create_county(self, county):
return County(
name=county['County Name'],
slug=slugify(county['County Name']),
fips=county['FIPS #'],
state=self.texas,
)
## Instruction:
Add feedback during county loader
## Code After:
from __future__ import absolute_import, unicode_literals
import csv
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.text import slugify
from ...models import County
from scuole.states.models import State
class Command(BaseCommand):
help = 'Bootstraps County models using DSHS county list.'
def handle(self, *args, **options):
self.texas = State.objects.get(name='Texas')
counties_file = os.path.join(
settings.DATA_FOLDER, 'counties', 'counties.csv')
with open(counties_file, 'rU') as f:
reader = csv.DictReader(f)
counties = []
for row in reader:
counties.append(self.create_county(row))
County.objects.bulk_create(counties)
def create_county(self, county):
self.stdout.write(
'Creating {} County...'.format(county['County Name']))
return County(
name=county['County Name'],
slug=slugify(county['County Name']),
fips=county['FIPS #'],
state=self.texas,
)
|
# ... existing code ...
def create_county(self, county):
self.stdout.write(
'Creating {} County...'.format(county['County Name']))
return County(
# ... rest of the code ...
|
8da30d3752fd6a056891960aa2892bcd8001c79b
|
lintreview/processor.py
|
lintreview/processor.py
|
import logging
import lintreview.tools as tools
from lintreview.diff import DiffCollection
from lintreview.review import Problems
from lintreview.review import Review
log = logging.getLogger(__name__)
class Processor(object):
def __init__(self, client, number, head, target_path):
self._client = client
self._number = number
self._head = head
self._target_path = target_path
self._changes = None
self._problems = Problems(target_path)
self._review = Review(client, number)
def load_changes(self):
log.info('Loading pull request patches from github.')
files = self._client.pull_requests.list_files(self._number)
pull_request_patches = files.all()
self._changes = DiffCollection(pull_request_patches)
self._problems.set_changes(self._changes)
def run_tools(self, repo_config):
if not self._changes:
raise RuntimeError('No loaded changes, cannot run tools. '
'Try calling load_changes first.')
files_to_check = self._changes.get_files(append_base=self._target_path)
tools.run(
repo_config,
self._problems,
files_to_check,
self._target_path)
def publish(self, wait_time=0):
self._problems.limit_to_changes()
self._review.publish(self._problems, self._head, wait_time)
|
import logging
import lintreview.tools as tools
from lintreview.diff import DiffCollection
from lintreview.review import Problems
from lintreview.review import Review
log = logging.getLogger(__name__)
class Processor(object):
def __init__(self, client, number, head, target_path):
self._client = client
self._number = number
self._head = head
self._target_path = target_path
self._changes = None
self._problems = Problems(target_path)
self._review = Review(client, number)
def load_changes(self):
log.info('Loading pull request patches from github.')
files = self._client.pull_requests.list_files(self._number)
pull_request_patches = files.all()
self._changes = DiffCollection(pull_request_patches)
self._problems.set_changes(self._changes)
def run_tools(self, repo_config):
if self._changes is None:
raise RuntimeError('No loaded changes, cannot run tools. '
'Try calling load_changes first.')
files_to_check = self._changes.get_files(append_base=self._target_path)
tools.run(
repo_config,
self._problems,
files_to_check,
self._target_path)
def publish(self, wait_time=0):
self._problems.limit_to_changes()
self._review.publish(self._problems, self._head, wait_time)
|
Make check against None instead of falsey things.
|
Make check against None instead of falsey things.
|
Python
|
mit
|
markstory/lint-review,markstory/lint-review,adrianmoisey/lint-review,zoidbergwill/lint-review,zoidbergwill/lint-review,adrianmoisey/lint-review,zoidbergwill/lint-review,markstory/lint-review
|
import logging
import lintreview.tools as tools
from lintreview.diff import DiffCollection
from lintreview.review import Problems
from lintreview.review import Review
log = logging.getLogger(__name__)
class Processor(object):
def __init__(self, client, number, head, target_path):
self._client = client
self._number = number
self._head = head
self._target_path = target_path
self._changes = None
self._problems = Problems(target_path)
self._review = Review(client, number)
def load_changes(self):
log.info('Loading pull request patches from github.')
files = self._client.pull_requests.list_files(self._number)
pull_request_patches = files.all()
self._changes = DiffCollection(pull_request_patches)
self._problems.set_changes(self._changes)
def run_tools(self, repo_config):
- if not self._changes:
+ if self._changes is None:
raise RuntimeError('No loaded changes, cannot run tools. '
'Try calling load_changes first.')
files_to_check = self._changes.get_files(append_base=self._target_path)
tools.run(
repo_config,
self._problems,
files_to_check,
self._target_path)
def publish(self, wait_time=0):
self._problems.limit_to_changes()
self._review.publish(self._problems, self._head, wait_time)
|
Make check against None instead of falsey things.
|
## Code Before:
import logging
import lintreview.tools as tools
from lintreview.diff import DiffCollection
from lintreview.review import Problems
from lintreview.review import Review
log = logging.getLogger(__name__)
class Processor(object):
def __init__(self, client, number, head, target_path):
self._client = client
self._number = number
self._head = head
self._target_path = target_path
self._changes = None
self._problems = Problems(target_path)
self._review = Review(client, number)
def load_changes(self):
log.info('Loading pull request patches from github.')
files = self._client.pull_requests.list_files(self._number)
pull_request_patches = files.all()
self._changes = DiffCollection(pull_request_patches)
self._problems.set_changes(self._changes)
def run_tools(self, repo_config):
if not self._changes:
raise RuntimeError('No loaded changes, cannot run tools. '
'Try calling load_changes first.')
files_to_check = self._changes.get_files(append_base=self._target_path)
tools.run(
repo_config,
self._problems,
files_to_check,
self._target_path)
def publish(self, wait_time=0):
self._problems.limit_to_changes()
self._review.publish(self._problems, self._head, wait_time)
## Instruction:
Make check against None instead of falsey things.
## Code After:
import logging
import lintreview.tools as tools
from lintreview.diff import DiffCollection
from lintreview.review import Problems
from lintreview.review import Review
log = logging.getLogger(__name__)
class Processor(object):
def __init__(self, client, number, head, target_path):
self._client = client
self._number = number
self._head = head
self._target_path = target_path
self._changes = None
self._problems = Problems(target_path)
self._review = Review(client, number)
def load_changes(self):
log.info('Loading pull request patches from github.')
files = self._client.pull_requests.list_files(self._number)
pull_request_patches = files.all()
self._changes = DiffCollection(pull_request_patches)
self._problems.set_changes(self._changes)
def run_tools(self, repo_config):
if self._changes is None:
raise RuntimeError('No loaded changes, cannot run tools. '
'Try calling load_changes first.')
files_to_check = self._changes.get_files(append_base=self._target_path)
tools.run(
repo_config,
self._problems,
files_to_check,
self._target_path)
def publish(self, wait_time=0):
self._problems.limit_to_changes()
self._review.publish(self._problems, self._head, wait_time)
|
...
def run_tools(self, repo_config):
if self._changes is None:
raise RuntimeError('No loaded changes, cannot run tools. '
...
|
cc2b579377abde262d76e2484a6488e254b315fc
|
judge/caching.py
|
judge/caching.py
|
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
def update_submission(id):
key = 'version:submission-%d' % id
cache.add(key, 0, None)
cache.incr(key)
def update_stats():
cache.delete('sub_stats_table')
cache.delete('sub_stats_data')
def point_update(profile):
cache.delete(make_template_fragment_key('global_user'))
def finished_submission(sub):
cache.delete('user_complete:%d' % sub.user_id)
if hasattr(sub, 'contest'):
participation = sub.contest.participation
cache.delete('contest_complete:%d' % participation.id)
cache.delete(make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id)))
|
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
def update_submission(id):
key = 'version:submission-%d' % id
cache.add(key, 0, None)
cache.incr(key)
def update_stats():
cache.delete_many(('sub_stats_table', 'sub_stats_data'))
def point_update(profile):
cache.delete(make_template_fragment_key('global_user'))
def finished_submission(sub):
keys = ['user_complete:%d' % sub.user_id]
if hasattr(sub, 'contest'):
participation = sub.contest.participation
keys += ['contest_complete:%d' % participation.id,
make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id))]
cache.delete_many(keys)
|
Delete many to reduce round trips to the cache.
|
Delete many to reduce round trips to the cache.
|
Python
|
agpl-3.0
|
Minkov/site,Minkov/site,DMOJ/site,Minkov/site,DMOJ/site,DMOJ/site,Phoenix1369/site,Phoenix1369/site,Minkov/site,monouno/site,Phoenix1369/site,monouno/site,monouno/site,monouno/site,monouno/site,DMOJ/site,Phoenix1369/site
|
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
def update_submission(id):
key = 'version:submission-%d' % id
cache.add(key, 0, None)
cache.incr(key)
def update_stats():
+ cache.delete_many(('sub_stats_table', 'sub_stats_data'))
- cache.delete('sub_stats_table')
- cache.delete('sub_stats_data')
def point_update(profile):
cache.delete(make_template_fragment_key('global_user'))
def finished_submission(sub):
- cache.delete('user_complete:%d' % sub.user_id)
+ keys = ['user_complete:%d' % sub.user_id]
if hasattr(sub, 'contest'):
participation = sub.contest.participation
- cache.delete('contest_complete:%d' % participation.id)
+ keys += ['contest_complete:%d' % participation.id,
- cache.delete(make_template_fragment_key('conrank_user_prob',
+ make_template_fragment_key('conrank_user_prob',
- (participation.profile.user_id,
+ (participation.profile.user_id,
- participation.contest_id)))
+ participation.contest_id))]
+ cache.delete_many(keys)
+
|
Delete many to reduce round trips to the cache.
|
## Code Before:
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
def update_submission(id):
key = 'version:submission-%d' % id
cache.add(key, 0, None)
cache.incr(key)
def update_stats():
cache.delete('sub_stats_table')
cache.delete('sub_stats_data')
def point_update(profile):
cache.delete(make_template_fragment_key('global_user'))
def finished_submission(sub):
cache.delete('user_complete:%d' % sub.user_id)
if hasattr(sub, 'contest'):
participation = sub.contest.participation
cache.delete('contest_complete:%d' % participation.id)
cache.delete(make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id)))
## Instruction:
Delete many to reduce round trips to the cache.
## Code After:
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
def update_submission(id):
key = 'version:submission-%d' % id
cache.add(key, 0, None)
cache.incr(key)
def update_stats():
cache.delete_many(('sub_stats_table', 'sub_stats_data'))
def point_update(profile):
cache.delete(make_template_fragment_key('global_user'))
def finished_submission(sub):
keys = ['user_complete:%d' % sub.user_id]
if hasattr(sub, 'contest'):
participation = sub.contest.participation
keys += ['contest_complete:%d' % participation.id,
make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id))]
cache.delete_many(keys)
|
...
def update_stats():
cache.delete_many(('sub_stats_table', 'sub_stats_data'))
...
def finished_submission(sub):
keys = ['user_complete:%d' % sub.user_id]
if hasattr(sub, 'contest'):
...
participation = sub.contest.participation
keys += ['contest_complete:%d' % participation.id,
make_template_fragment_key('conrank_user_prob',
(participation.profile.user_id,
participation.contest_id))]
cache.delete_many(keys)
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.