repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
ucsb-cs-education/hairball
hairball/plugins/initialization.py
AttributeInitialization.analyze
def analyze(self, scratch, **kwargs): """Run and return the results of the AttributeInitialization plugin.""" changes = dict((x.name, self.sprite_changes(x)) for x in scratch.sprites) changes['stage'] = { 'background': self.attribute_state(scratch.stage.scripts, 'costume')} # self.output_results(changes) return {'initialized': changes}
python
def analyze(self, scratch, **kwargs): """Run and return the results of the AttributeInitialization plugin.""" changes = dict((x.name, self.sprite_changes(x)) for x in scratch.sprites) changes['stage'] = { 'background': self.attribute_state(scratch.stage.scripts, 'costume')} # self.output_results(changes) return {'initialized': changes}
[ "def", "analyze", "(", "self", ",", "scratch", ",", "*", "*", "kwargs", ")", ":", "changes", "=", "dict", "(", "(", "x", ".", "name", ",", "self", ".", "sprite_changes", "(", "x", ")", ")", "for", "x", "in", "scratch", ".", "sprites", ")", "changes", "[", "'stage'", "]", "=", "{", "'background'", ":", "self", ".", "attribute_state", "(", "scratch", ".", "stage", ".", "scripts", ",", "'costume'", ")", "}", "# self.output_results(changes)", "return", "{", "'initialized'", ":", "changes", "}" ]
Run and return the results of the AttributeInitialization plugin.
[ "Run", "and", "return", "the", "results", "of", "the", "AttributeInitialization", "plugin", "." ]
c6da8971f8a34e88ce401d36b51431715e1dff5b
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/initialization.py#L108-L116
train
ucsb-cs-education/hairball
hairball/plugins/initialization.py
VariableInitialization.variable_state
def variable_state(cls, scripts, variables): """Return the initialization state for each variable in variables. The state is determined based on the scripts passed in via the scripts parameter. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized. """ def conditionally_set_not_modified(): """Set the variable to modified if it hasn't been altered.""" state = variables.get(block.args[0], None) if state == cls.STATE_NOT_MODIFIED: variables[block.args[0]] = cls.STATE_MODIFIED green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG) variables = dict((x, cls.STATE_NOT_MODIFIED) for x in variables) for script in green_flag: in_zone = True for name, level, block in cls.iter_blocks(script.blocks): if name == 'broadcast %s and wait': in_zone = False if name == 'set %s effect to %s': state = variables.get(block.args[0], None) if state is None: continue # Not a variable we care about if in_zone and level == 0: # Success! if state == cls.STATE_NOT_MODIFIED: state = cls.STATE_INITIALIZED else: # Multiple when green flag clicked conflict # TODO: Need to allow multiple sets of a variable # within the same script # print 'CONFLICT', script state = cls.STATE_MODIFIED elif in_zone: continue # Conservative ignore for nested absolutes elif state == cls.STATE_NOT_MODIFIED: state = cls.STATE_MODIFIED variables[block.args[0]] = state elif name == 'change %s effect by %s': conditionally_set_not_modified() for script in other: for name, _, block in cls.iter_blocks(script.blocks): if name in ('change %s effect by %s', 'set %s effect to %s'): conditionally_set_not_modified() return variables
python
def variable_state(cls, scripts, variables): """Return the initialization state for each variable in variables. The state is determined based on the scripts passed in via the scripts parameter. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized. """ def conditionally_set_not_modified(): """Set the variable to modified if it hasn't been altered.""" state = variables.get(block.args[0], None) if state == cls.STATE_NOT_MODIFIED: variables[block.args[0]] = cls.STATE_MODIFIED green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG) variables = dict((x, cls.STATE_NOT_MODIFIED) for x in variables) for script in green_flag: in_zone = True for name, level, block in cls.iter_blocks(script.blocks): if name == 'broadcast %s and wait': in_zone = False if name == 'set %s effect to %s': state = variables.get(block.args[0], None) if state is None: continue # Not a variable we care about if in_zone and level == 0: # Success! if state == cls.STATE_NOT_MODIFIED: state = cls.STATE_INITIALIZED else: # Multiple when green flag clicked conflict # TODO: Need to allow multiple sets of a variable # within the same script # print 'CONFLICT', script state = cls.STATE_MODIFIED elif in_zone: continue # Conservative ignore for nested absolutes elif state == cls.STATE_NOT_MODIFIED: state = cls.STATE_MODIFIED variables[block.args[0]] = state elif name == 'change %s effect by %s': conditionally_set_not_modified() for script in other: for name, _, block in cls.iter_blocks(script.blocks): if name in ('change %s effect by %s', 'set %s effect to %s'): conditionally_set_not_modified() return variables
[ "def", "variable_state", "(", "cls", ",", "scripts", ",", "variables", ")", ":", "def", "conditionally_set_not_modified", "(", ")", ":", "\"\"\"Set the variable to modified if it hasn't been altered.\"\"\"", "state", "=", "variables", ".", "get", "(", "block", ".", "args", "[", "0", "]", ",", "None", ")", "if", "state", "==", "cls", ".", "STATE_NOT_MODIFIED", ":", "variables", "[", "block", ".", "args", "[", "0", "]", "]", "=", "cls", ".", "STATE_MODIFIED", "green_flag", ",", "other", "=", "partition_scripts", "(", "scripts", ",", "cls", ".", "HAT_GREEN_FLAG", ")", "variables", "=", "dict", "(", "(", "x", ",", "cls", ".", "STATE_NOT_MODIFIED", ")", "for", "x", "in", "variables", ")", "for", "script", "in", "green_flag", ":", "in_zone", "=", "True", "for", "name", ",", "level", ",", "block", "in", "cls", ".", "iter_blocks", "(", "script", ".", "blocks", ")", ":", "if", "name", "==", "'broadcast %s and wait'", ":", "in_zone", "=", "False", "if", "name", "==", "'set %s effect to %s'", ":", "state", "=", "variables", ".", "get", "(", "block", ".", "args", "[", "0", "]", ",", "None", ")", "if", "state", "is", "None", ":", "continue", "# Not a variable we care about", "if", "in_zone", "and", "level", "==", "0", ":", "# Success!", "if", "state", "==", "cls", ".", "STATE_NOT_MODIFIED", ":", "state", "=", "cls", ".", "STATE_INITIALIZED", "else", ":", "# Multiple when green flag clicked conflict", "# TODO: Need to allow multiple sets of a variable", "# within the same script", "# print 'CONFLICT', script", "state", "=", "cls", ".", "STATE_MODIFIED", "elif", "in_zone", ":", "continue", "# Conservative ignore for nested absolutes", "elif", "state", "==", "cls", ".", "STATE_NOT_MODIFIED", ":", "state", "=", "cls", ".", "STATE_MODIFIED", "variables", "[", "block", ".", "args", "[", "0", "]", "]", "=", "state", "elif", "name", "==", "'change %s effect by %s'", ":", "conditionally_set_not_modified", "(", ")", "for", "script", "in", "other", ":", "for", "name", ",", "_", ",", "block", "in", "cls", ".", "iter_blocks", "(", "script", ".", "blocks", ")", ":", "if", "name", "in", "(", "'change %s effect by %s'", ",", "'set %s effect to %s'", ")", ":", "conditionally_set_not_modified", "(", ")", "return", "variables" ]
Return the initialization state for each variable in variables. The state is determined based on the scripts passed in via the scripts parameter. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized.
[ "Return", "the", "initialization", "state", "for", "each", "variable", "in", "variables", "." ]
c6da8971f8a34e88ce401d36b51431715e1dff5b
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/initialization.py#L128-L175
train
ucsb-cs-education/hairball
hairball/plugins/initialization.py
VariableInitialization.analyze
def analyze(self, scratch, **kwargs): """Run and return the results of the VariableInitialization plugin.""" variables = dict((x, self.variable_state(x.scripts, x.variables)) for x in scratch.sprites) variables['global'] = self.variable_state(self.iter_scripts(scratch), scratch.stage.variables) # Output for now import pprint pprint.pprint(variables) return {'variables': variables}
python
def analyze(self, scratch, **kwargs): """Run and return the results of the VariableInitialization plugin.""" variables = dict((x, self.variable_state(x.scripts, x.variables)) for x in scratch.sprites) variables['global'] = self.variable_state(self.iter_scripts(scratch), scratch.stage.variables) # Output for now import pprint pprint.pprint(variables) return {'variables': variables}
[ "def", "analyze", "(", "self", ",", "scratch", ",", "*", "*", "kwargs", ")", ":", "variables", "=", "dict", "(", "(", "x", ",", "self", ".", "variable_state", "(", "x", ".", "scripts", ",", "x", ".", "variables", ")", ")", "for", "x", "in", "scratch", ".", "sprites", ")", "variables", "[", "'global'", "]", "=", "self", ".", "variable_state", "(", "self", ".", "iter_scripts", "(", "scratch", ")", ",", "scratch", ".", "stage", ".", "variables", ")", "# Output for now", "import", "pprint", "pprint", ".", "pprint", "(", "variables", ")", "return", "{", "'variables'", ":", "variables", "}" ]
Run and return the results of the VariableInitialization plugin.
[ "Run", "and", "return", "the", "results", "of", "the", "VariableInitialization", "plugin", "." ]
c6da8971f8a34e88ce401d36b51431715e1dff5b
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/initialization.py#L177-L186
train
ucsb-cs-education/hairball
hairball/plugins/convention.py
SpriteNaming.finalize
def finalize(self): """Output the default sprite names found in the project.""" print('{} default sprite names found:'.format(self.total_default)) for name in self.list_default: print(name)
python
def finalize(self): """Output the default sprite names found in the project.""" print('{} default sprite names found:'.format(self.total_default)) for name in self.list_default: print(name)
[ "def", "finalize", "(", "self", ")", ":", "print", "(", "'{} default sprite names found:'", ".", "format", "(", "self", ".", "total_default", ")", ")", "for", "name", "in", "self", ".", "list_default", ":", "print", "(", "name", ")" ]
Output the default sprite names found in the project.
[ "Output", "the", "default", "sprite", "names", "found", "in", "the", "project", "." ]
c6da8971f8a34e88ce401d36b51431715e1dff5b
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/convention.py#L22-L26
train
ucsb-cs-education/hairball
hairball/plugins/convention.py
SpriteNaming.analyze
def analyze(self, scratch, **kwargs): """Run and return the results from the SpriteNaming plugin.""" for sprite in self.iter_sprites(scratch): for default in self.default_names: if default in sprite.name: self.total_default += 1 self.list_default.append(sprite.name)
python
def analyze(self, scratch, **kwargs): """Run and return the results from the SpriteNaming plugin.""" for sprite in self.iter_sprites(scratch): for default in self.default_names: if default in sprite.name: self.total_default += 1 self.list_default.append(sprite.name)
[ "def", "analyze", "(", "self", ",", "scratch", ",", "*", "*", "kwargs", ")", ":", "for", "sprite", "in", "self", ".", "iter_sprites", "(", "scratch", ")", ":", "for", "default", "in", "self", ".", "default_names", ":", "if", "default", "in", "sprite", ".", "name", ":", "self", ".", "total_default", "+=", "1", "self", ".", "list_default", ".", "append", "(", "sprite", ".", "name", ")" ]
Run and return the results from the SpriteNaming plugin.
[ "Run", "and", "return", "the", "results", "from", "the", "SpriteNaming", "plugin", "." ]
c6da8971f8a34e88ce401d36b51431715e1dff5b
https://github.com/ucsb-cs-education/hairball/blob/c6da8971f8a34e88ce401d36b51431715e1dff5b/hairball/plugins/convention.py#L28-L34
train
ramses-tech/nefertari
nefertari/tweens.py
get_tunneling
def get_tunneling(handler, registry): """ Allows all methods to be tunneled via GET for dev/debuging purposes. """ log.info('get_tunneling enabled') def get_tunneling(request): if request.method == 'GET': method = request.GET.pop('_m', 'GET') request.method = method if method in ['POST', 'PUT', 'PATCH']: get_params = request.GET.mixed() valid_params = drop_reserved_params(get_params) request.body = six.b(json.dumps(valid_params)) request.content_type = 'application/json' request._tunneled_get = True return handler(request) return get_tunneling
python
def get_tunneling(handler, registry): """ Allows all methods to be tunneled via GET for dev/debuging purposes. """ log.info('get_tunneling enabled') def get_tunneling(request): if request.method == 'GET': method = request.GET.pop('_m', 'GET') request.method = method if method in ['POST', 'PUT', 'PATCH']: get_params = request.GET.mixed() valid_params = drop_reserved_params(get_params) request.body = six.b(json.dumps(valid_params)) request.content_type = 'application/json' request._tunneled_get = True return handler(request) return get_tunneling
[ "def", "get_tunneling", "(", "handler", ",", "registry", ")", ":", "log", ".", "info", "(", "'get_tunneling enabled'", ")", "def", "get_tunneling", "(", "request", ")", ":", "if", "request", ".", "method", "==", "'GET'", ":", "method", "=", "request", ".", "GET", ".", "pop", "(", "'_m'", ",", "'GET'", ")", "request", ".", "method", "=", "method", "if", "method", "in", "[", "'POST'", ",", "'PUT'", ",", "'PATCH'", "]", ":", "get_params", "=", "request", ".", "GET", ".", "mixed", "(", ")", "valid_params", "=", "drop_reserved_params", "(", "get_params", ")", "request", ".", "body", "=", "six", ".", "b", "(", "json", ".", "dumps", "(", "valid_params", ")", ")", "request", ".", "content_type", "=", "'application/json'", "request", ".", "_tunneled_get", "=", "True", "return", "handler", "(", "request", ")", "return", "get_tunneling" ]
Allows all methods to be tunneled via GET for dev/debuging purposes.
[ "Allows", "all", "methods", "to", "be", "tunneled", "via", "GET", "for", "dev", "/", "debuging", "purposes", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/tweens.py#L34-L54
train
ramses-tech/nefertari
nefertari/tweens.py
enable_selfalias
def enable_selfalias(config, id_name): """ This allows replacing id_name with "self". e.g. /users/joe/account == /users/self/account if joe is in the session as an authorized user """ def context_found_subscriber(event): request = event.request user = getattr(request, 'user', None) if (request.matchdict and request.matchdict.get(id_name, None) == 'self' and user): request.matchdict[id_name] = user.username config.add_subscriber(context_found_subscriber, ContextFound)
python
def enable_selfalias(config, id_name): """ This allows replacing id_name with "self". e.g. /users/joe/account == /users/self/account if joe is in the session as an authorized user """ def context_found_subscriber(event): request = event.request user = getattr(request, 'user', None) if (request.matchdict and request.matchdict.get(id_name, None) == 'self' and user): request.matchdict[id_name] = user.username config.add_subscriber(context_found_subscriber, ContextFound)
[ "def", "enable_selfalias", "(", "config", ",", "id_name", ")", ":", "def", "context_found_subscriber", "(", "event", ")", ":", "request", "=", "event", ".", "request", "user", "=", "getattr", "(", "request", ",", "'user'", ",", "None", ")", "if", "(", "request", ".", "matchdict", "and", "request", ".", "matchdict", ".", "get", "(", "id_name", ",", "None", ")", "==", "'self'", "and", "user", ")", ":", "request", ".", "matchdict", "[", "id_name", "]", "=", "user", ".", "username", "config", ".", "add_subscriber", "(", "context_found_subscriber", ",", "ContextFound", ")" ]
This allows replacing id_name with "self". e.g. /users/joe/account == /users/self/account if joe is in the session as an authorized user
[ "This", "allows", "replacing", "id_name", "with", "self", ".", "e", ".", "g", ".", "/", "users", "/", "joe", "/", "account", "==", "/", "users", "/", "self", "/", "account", "if", "joe", "is", "in", "the", "session", "as", "an", "authorized", "user" ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/tweens.py#L136-L151
train
ramses-tech/nefertari
nefertari/view.py
BaseView.convert_dotted
def convert_dotted(params): """ Convert dotted keys in :params: dictset to a nested dictset. E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}} """ if not isinstance(params, dictset): params = dictset(params) dotted_items = {k: v for k, v in params.items() if '.' in k} if dotted_items: dicts = [str2dict(key, val) for key, val in dotted_items.items()] dotted = six.functools.reduce(merge_dicts, dicts) params = params.subset(['-' + k for k in dotted_items.keys()]) params.update(dict(dotted)) return params
python
def convert_dotted(params): """ Convert dotted keys in :params: dictset to a nested dictset. E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}} """ if not isinstance(params, dictset): params = dictset(params) dotted_items = {k: v for k, v in params.items() if '.' in k} if dotted_items: dicts = [str2dict(key, val) for key, val in dotted_items.items()] dotted = six.functools.reduce(merge_dicts, dicts) params = params.subset(['-' + k for k in dotted_items.keys()]) params.update(dict(dotted)) return params
[ "def", "convert_dotted", "(", "params", ")", ":", "if", "not", "isinstance", "(", "params", ",", "dictset", ")", ":", "params", "=", "dictset", "(", "params", ")", "dotted_items", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "params", ".", "items", "(", ")", "if", "'.'", "in", "k", "}", "if", "dotted_items", ":", "dicts", "=", "[", "str2dict", "(", "key", ",", "val", ")", "for", "key", ",", "val", "in", "dotted_items", ".", "items", "(", ")", "]", "dotted", "=", "six", ".", "functools", ".", "reduce", "(", "merge_dicts", ",", "dicts", ")", "params", "=", "params", ".", "subset", "(", "[", "'-'", "+", "k", "for", "k", "in", "dotted_items", ".", "keys", "(", ")", "]", ")", "params", ".", "update", "(", "dict", "(", "dotted", ")", ")", "return", "params" ]
Convert dotted keys in :params: dictset to a nested dictset. E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}}
[ "Convert", "dotted", "keys", "in", ":", "params", ":", "dictset", "to", "a", "nested", "dictset", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L79-L95
train
ramses-tech/nefertari
nefertari/view.py
BaseView.prepare_request_params
def prepare_request_params(self, _query_params, _json_params): """ Prepare query and update params. """ self._query_params = dictset( _query_params or self.request.params.mixed()) self._json_params = dictset(_json_params) ctype = self.request.content_type if self.request.method in ['POST', 'PUT', 'PATCH']: if ctype == 'application/json': try: self._json_params.update(self.request.json) except simplejson.JSONDecodeError: log.error( "Expecting JSON. Received: '{}'. " "Request: {} {}".format( self.request.body, self.request.method, self.request.url)) self._json_params = BaseView.convert_dotted(self._json_params) self._query_params = BaseView.convert_dotted(self._query_params) self._params = self._query_params.copy() self._params.update(self._json_params)
python
def prepare_request_params(self, _query_params, _json_params): """ Prepare query and update params. """ self._query_params = dictset( _query_params or self.request.params.mixed()) self._json_params = dictset(_json_params) ctype = self.request.content_type if self.request.method in ['POST', 'PUT', 'PATCH']: if ctype == 'application/json': try: self._json_params.update(self.request.json) except simplejson.JSONDecodeError: log.error( "Expecting JSON. Received: '{}'. " "Request: {} {}".format( self.request.body, self.request.method, self.request.url)) self._json_params = BaseView.convert_dotted(self._json_params) self._query_params = BaseView.convert_dotted(self._query_params) self._params = self._query_params.copy() self._params.update(self._json_params)
[ "def", "prepare_request_params", "(", "self", ",", "_query_params", ",", "_json_params", ")", ":", "self", ".", "_query_params", "=", "dictset", "(", "_query_params", "or", "self", ".", "request", ".", "params", ".", "mixed", "(", ")", ")", "self", ".", "_json_params", "=", "dictset", "(", "_json_params", ")", "ctype", "=", "self", ".", "request", ".", "content_type", "if", "self", ".", "request", ".", "method", "in", "[", "'POST'", ",", "'PUT'", ",", "'PATCH'", "]", ":", "if", "ctype", "==", "'application/json'", ":", "try", ":", "self", ".", "_json_params", ".", "update", "(", "self", ".", "request", ".", "json", ")", "except", "simplejson", ".", "JSONDecodeError", ":", "log", ".", "error", "(", "\"Expecting JSON. Received: '{}'. \"", "\"Request: {} {}\"", ".", "format", "(", "self", ".", "request", ".", "body", ",", "self", ".", "request", ".", "method", ",", "self", ".", "request", ".", "url", ")", ")", "self", ".", "_json_params", "=", "BaseView", ".", "convert_dotted", "(", "self", ".", "_json_params", ")", "self", ".", "_query_params", "=", "BaseView", ".", "convert_dotted", "(", "self", ".", "_query_params", ")", "self", ".", "_params", "=", "self", ".", "_query_params", ".", "copy", "(", ")", "self", ".", "_params", ".", "update", "(", "self", ".", "_json_params", ")" ]
Prepare query and update params.
[ "Prepare", "query", "and", "update", "params", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L136-L158
train
ramses-tech/nefertari
nefertari/view.py
BaseView.set_override_rendered
def set_override_rendered(self): """ Set self.request.override_renderer if needed. """ if '' in self.request.accept: self.request.override_renderer = self._default_renderer elif 'application/json' in self.request.accept: self.request.override_renderer = 'nefertari_json' elif 'text/plain' in self.request.accept: self.request.override_renderer = 'string'
python
def set_override_rendered(self): """ Set self.request.override_renderer if needed. """ if '' in self.request.accept: self.request.override_renderer = self._default_renderer elif 'application/json' in self.request.accept: self.request.override_renderer = 'nefertari_json' elif 'text/plain' in self.request.accept: self.request.override_renderer = 'string'
[ "def", "set_override_rendered", "(", "self", ")", ":", "if", "''", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "self", ".", "_default_renderer", "elif", "'application/json'", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "'nefertari_json'", "elif", "'text/plain'", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "'string'" ]
Set self.request.override_renderer if needed.
[ "Set", "self", ".", "request", ".", "override_renderer", "if", "needed", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L160-L167
train
ramses-tech/nefertari
nefertari/view.py
BaseView._setup_aggregation
def _setup_aggregation(self, aggregator=None): """ Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true. """ from nefertari.elasticsearch import ES if aggregator is None: aggregator = ESAggregator aggregations_enabled = ( ES.settings and ES.settings.asbool('enable_aggregations')) if not aggregations_enabled: log.debug('Elasticsearch aggregations are not enabled') return index = getattr(self, 'index', None) index_defined = index and index != self.not_allowed_action if index_defined: self.index = aggregator(self).wrap(self.index)
python
def _setup_aggregation(self, aggregator=None): """ Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true. """ from nefertari.elasticsearch import ES if aggregator is None: aggregator = ESAggregator aggregations_enabled = ( ES.settings and ES.settings.asbool('enable_aggregations')) if not aggregations_enabled: log.debug('Elasticsearch aggregations are not enabled') return index = getattr(self, 'index', None) index_defined = index and index != self.not_allowed_action if index_defined: self.index = aggregator(self).wrap(self.index)
[ "def", "_setup_aggregation", "(", "self", ",", "aggregator", "=", "None", ")", ":", "from", "nefertari", ".", "elasticsearch", "import", "ES", "if", "aggregator", "is", "None", ":", "aggregator", "=", "ESAggregator", "aggregations_enabled", "=", "(", "ES", ".", "settings", "and", "ES", ".", "settings", ".", "asbool", "(", "'enable_aggregations'", ")", ")", "if", "not", "aggregations_enabled", ":", "log", ".", "debug", "(", "'Elasticsearch aggregations are not enabled'", ")", "return", "index", "=", "getattr", "(", "self", ",", "'index'", ",", "None", ")", "index_defined", "=", "index", "and", "index", "!=", "self", ".", "not_allowed_action", "if", "index_defined", ":", "self", ".", "index", "=", "aggregator", "(", "self", ")", ".", "wrap", "(", "self", ".", "index", ")" ]
Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true.
[ "Wrap", "self", ".", "index", "method", "with", "ESAggregator", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L169-L188
train
ramses-tech/nefertari
nefertari/view.py
BaseView.get_collection_es
def get_collection_es(self): """ Query ES collection and return results. This is default implementation of querying ES collection with `self._query_params`. It must return found ES collection results for default response renderers to work properly. """ from nefertari.elasticsearch import ES return ES(self.Model.__name__).get_collection(**self._query_params)
python
def get_collection_es(self): """ Query ES collection and return results. This is default implementation of querying ES collection with `self._query_params`. It must return found ES collection results for default response renderers to work properly. """ from nefertari.elasticsearch import ES return ES(self.Model.__name__).get_collection(**self._query_params)
[ "def", "get_collection_es", "(", "self", ")", ":", "from", "nefertari", ".", "elasticsearch", "import", "ES", "return", "ES", "(", "self", ".", "Model", ".", "__name__", ")", ".", "get_collection", "(", "*", "*", "self", ".", "_query_params", ")" ]
Query ES collection and return results. This is default implementation of querying ES collection with `self._query_params`. It must return found ES collection results for default response renderers to work properly.
[ "Query", "ES", "collection", "and", "return", "results", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L190-L198
train
ramses-tech/nefertari
nefertari/view.py
BaseView.fill_null_values
def fill_null_values(self): """ Fill missing model fields in JSON with {key: null value}. Only run for PUT requests. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return empty_values = self.Model.get_null_values() for field, value in empty_values.items(): if field not in self._json_params: self._json_params[field] = value
python
def fill_null_values(self): """ Fill missing model fields in JSON with {key: null value}. Only run for PUT requests. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return empty_values = self.Model.get_null_values() for field, value in empty_values.items(): if field not in self._json_params: self._json_params[field] = value
[ "def", "fill_null_values", "(", "self", ")", ":", "if", "not", "self", ".", "Model", ":", "log", ".", "info", "(", "\"%s has no model defined\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "empty_values", "=", "self", ".", "Model", ".", "get_null_values", "(", ")", "for", "field", ",", "value", "in", "empty_values", ".", "items", "(", ")", ":", "if", "field", "not", "in", "self", ".", "_json_params", ":", "self", ".", "_json_params", "[", "field", "]", "=", "value" ]
Fill missing model fields in JSON with {key: null value}. Only run for PUT requests.
[ "Fill", "missing", "model", "fields", "in", "JSON", "with", "{", "key", ":", "null", "value", "}", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L200-L212
train
ramses-tech/nefertari
nefertari/view.py
BaseView.set_public_limits
def set_public_limits(self): """ Set public limits if auth is enabled and user is not authenticated. Also sets default limit for GET, HEAD requests. """ if self.request.method.upper() in ['GET', 'HEAD']: self._query_params.process_int_param('_limit', 20) if self._auth_enabled and not getattr(self.request, 'user', None): wrappers.set_public_limits(self)
python
def set_public_limits(self): """ Set public limits if auth is enabled and user is not authenticated. Also sets default limit for GET, HEAD requests. """ if self.request.method.upper() in ['GET', 'HEAD']: self._query_params.process_int_param('_limit', 20) if self._auth_enabled and not getattr(self.request, 'user', None): wrappers.set_public_limits(self)
[ "def", "set_public_limits", "(", "self", ")", ":", "if", "self", ".", "request", ".", "method", ".", "upper", "(", ")", "in", "[", "'GET'", ",", "'HEAD'", "]", ":", "self", ".", "_query_params", ".", "process_int_param", "(", "'_limit'", ",", "20", ")", "if", "self", ".", "_auth_enabled", "and", "not", "getattr", "(", "self", ".", "request", ",", "'user'", ",", "None", ")", ":", "wrappers", ".", "set_public_limits", "(", "self", ")" ]
Set public limits if auth is enabled and user is not authenticated. Also sets default limit for GET, HEAD requests.
[ "Set", "public", "limits", "if", "auth", "is", "enabled", "and", "user", "is", "not", "authenticated", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L214-L223
train
ramses-tech/nefertari
nefertari/view.py
BaseView.convert_ids2objects
def convert_ids2objects(self): """ Convert object IDs from `self._json_params` to objects if needed. Only IDs that belong to relationship field of `self.Model` are converted. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return for field in self._json_params.keys(): if not engine.is_relationship_field(field, self.Model): continue rel_model_cls = engine.get_relationship_cls(field, self.Model) self.id2obj(field, rel_model_cls)
python
def convert_ids2objects(self): """ Convert object IDs from `self._json_params` to objects if needed. Only IDs that belong to relationship field of `self.Model` are converted. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return for field in self._json_params.keys(): if not engine.is_relationship_field(field, self.Model): continue rel_model_cls = engine.get_relationship_cls(field, self.Model) self.id2obj(field, rel_model_cls)
[ "def", "convert_ids2objects", "(", "self", ")", ":", "if", "not", "self", ".", "Model", ":", "log", ".", "info", "(", "\"%s has no model defined\"", "%", "self", ".", "__class__", ".", "__name__", ")", "return", "for", "field", "in", "self", ".", "_json_params", ".", "keys", "(", ")", ":", "if", "not", "engine", ".", "is_relationship_field", "(", "field", ",", "self", ".", "Model", ")", ":", "continue", "rel_model_cls", "=", "engine", ".", "get_relationship_cls", "(", "field", ",", "self", ".", "Model", ")", "self", ".", "id2obj", "(", "field", ",", "rel_model_cls", ")" ]
Convert object IDs from `self._json_params` to objects if needed. Only IDs that belong to relationship field of `self.Model` are converted.
[ "Convert", "object", "IDs", "from", "self", ".", "_json_params", "to", "objects", "if", "needed", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L225-L239
train
ramses-tech/nefertari
nefertari/view.py
BaseView.setup_default_wrappers
def setup_default_wrappers(self): """ Setup defaulf wrappers. Wrappers are applied when view method does not return instance of Response. In this case nefertari renderers call wrappers and handle response generation. """ # Index self._after_calls['index'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Show self._after_calls['show'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Create self._after_calls['create'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Update self._after_calls['update'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Replace self._after_calls['replace'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Privacy wrappers if self._auth_enabled: for meth in ('index', 'show', 'create', 'update', 'replace'): self._after_calls[meth] += [ wrappers.apply_privacy(self.request), ] for meth in ('update', 'replace', 'update_many'): self._before_calls[meth] += [ wrappers.apply_request_privacy( self.Model, self._json_params), ]
python
def setup_default_wrappers(self): """ Setup defaulf wrappers. Wrappers are applied when view method does not return instance of Response. In this case nefertari renderers call wrappers and handle response generation. """ # Index self._after_calls['index'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Show self._after_calls['show'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Create self._after_calls['create'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Update self._after_calls['update'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Replace self._after_calls['replace'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Privacy wrappers if self._auth_enabled: for meth in ('index', 'show', 'create', 'update', 'replace'): self._after_calls[meth] += [ wrappers.apply_privacy(self.request), ] for meth in ('update', 'replace', 'update_many'): self._before_calls[meth] += [ wrappers.apply_request_privacy( self.Model, self._json_params), ]
[ "def", "setup_default_wrappers", "(", "self", ")", ":", "# Index", "self", ".", "_after_calls", "[", "'index'", "]", "=", "[", "wrappers", ".", "wrap_in_dict", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_meta", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_object_url", "(", "self", ".", "request", ")", ",", "]", "# Show", "self", ".", "_after_calls", "[", "'show'", "]", "=", "[", "wrappers", ".", "wrap_in_dict", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_meta", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_object_url", "(", "self", ".", "request", ")", ",", "]", "# Create", "self", ".", "_after_calls", "[", "'create'", "]", "=", "[", "wrappers", ".", "wrap_in_dict", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_meta", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_object_url", "(", "self", ".", "request", ")", ",", "]", "# Update", "self", ".", "_after_calls", "[", "'update'", "]", "=", "[", "wrappers", ".", "wrap_in_dict", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_meta", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_object_url", "(", "self", ".", "request", ")", ",", "]", "# Replace", "self", ".", "_after_calls", "[", "'replace'", "]", "=", "[", "wrappers", ".", "wrap_in_dict", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_meta", "(", "self", ".", "request", ")", ",", "wrappers", ".", "add_object_url", "(", "self", ".", "request", ")", ",", "]", "# Privacy wrappers", "if", "self", ".", "_auth_enabled", ":", "for", "meth", "in", "(", "'index'", ",", "'show'", ",", "'create'", ",", "'update'", ",", "'replace'", ")", ":", "self", ".", "_after_calls", "[", "meth", "]", "+=", "[", "wrappers", ".", "apply_privacy", "(", "self", ".", "request", ")", ",", "]", "for", "meth", "in", "(", "'update'", ",", "'replace'", ",", "'update_many'", ")", ":", "self", ".", "_before_calls", "[", "meth", "]", "+=", "[", "wrappers", ".", "apply_request_privacy", "(", "self", ".", "Model", ",", "self", ".", "_json_params", ")", ",", "]" ]
Setup defaulf wrappers. Wrappers are applied when view method does not return instance of Response. In this case nefertari renderers call wrappers and handle response generation.
[ "Setup", "defaulf", "wrappers", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L241-L293
train
turicas/mongodict
migrate_data.py
migrate_codec
def migrate_codec(config_old, config_new): '''Migrate data from mongodict <= 0.2.1 to 0.3.0 `config_old` and `config_new` should be dictionaries with the keys regarding to MongoDB server: - `host` - `port` - `database` - `collection` ''' assert mongodict.__version__ in [(0, 3, 0), (0, 3, 1)] connection = pymongo.Connection(host=config_old['host'], port=config_old['port']) database = connection[config_old['database']] collection = database[config_old['collection']] new_dict = mongodict.MongoDict(**config_new) # uses pickle codec by default total_pairs = collection.count() start_time = time.time() for counter, pair in enumerate(collection.find(), start=1): key, value = pair['_id'], pair['value'] new_dict[key] = value if counter % REPORT_INTERVAL == 0: print_report(counter, total_pairs, start_time) print_report(counter, total_pairs, start_time) print('')
python
def migrate_codec(config_old, config_new): '''Migrate data from mongodict <= 0.2.1 to 0.3.0 `config_old` and `config_new` should be dictionaries with the keys regarding to MongoDB server: - `host` - `port` - `database` - `collection` ''' assert mongodict.__version__ in [(0, 3, 0), (0, 3, 1)] connection = pymongo.Connection(host=config_old['host'], port=config_old['port']) database = connection[config_old['database']] collection = database[config_old['collection']] new_dict = mongodict.MongoDict(**config_new) # uses pickle codec by default total_pairs = collection.count() start_time = time.time() for counter, pair in enumerate(collection.find(), start=1): key, value = pair['_id'], pair['value'] new_dict[key] = value if counter % REPORT_INTERVAL == 0: print_report(counter, total_pairs, start_time) print_report(counter, total_pairs, start_time) print('')
[ "def", "migrate_codec", "(", "config_old", ",", "config_new", ")", ":", "assert", "mongodict", ".", "__version__", "in", "[", "(", "0", ",", "3", ",", "0", ")", ",", "(", "0", ",", "3", ",", "1", ")", "]", "connection", "=", "pymongo", ".", "Connection", "(", "host", "=", "config_old", "[", "'host'", "]", ",", "port", "=", "config_old", "[", "'port'", "]", ")", "database", "=", "connection", "[", "config_old", "[", "'database'", "]", "]", "collection", "=", "database", "[", "config_old", "[", "'collection'", "]", "]", "new_dict", "=", "mongodict", ".", "MongoDict", "(", "*", "*", "config_new", ")", "# uses pickle codec by default", "total_pairs", "=", "collection", ".", "count", "(", ")", "start_time", "=", "time", ".", "time", "(", ")", "for", "counter", ",", "pair", "in", "enumerate", "(", "collection", ".", "find", "(", ")", ",", "start", "=", "1", ")", ":", "key", ",", "value", "=", "pair", "[", "'_id'", "]", ",", "pair", "[", "'value'", "]", "new_dict", "[", "key", "]", "=", "value", "if", "counter", "%", "REPORT_INTERVAL", "==", "0", ":", "print_report", "(", "counter", ",", "total_pairs", ",", "start_time", ")", "print_report", "(", "counter", ",", "total_pairs", ",", "start_time", ")", "print", "(", "''", ")" ]
Migrate data from mongodict <= 0.2.1 to 0.3.0 `config_old` and `config_new` should be dictionaries with the keys regarding to MongoDB server: - `host` - `port` - `database` - `collection`
[ "Migrate", "data", "from", "mongodict", "<", "=", "0", ".", "2", ".", "1", "to", "0", ".", "3", ".", "0", "config_old", "and", "config_new", "should", "be", "dictionaries", "with", "the", "keys", "regarding", "to", "MongoDB", "server", ":", "-", "host", "-", "port", "-", "database", "-", "collection" ]
596211a57f68cc0574eededc5cb2a4be8f9ce098
https://github.com/turicas/mongodict/blob/596211a57f68cc0574eededc5cb2a4be8f9ce098/migrate_data.py#L40-L64
train
ramses-tech/nefertari
nefertari/utils/data.py
FieldData.from_dict
def from_dict(cls, data, model): """ Generate map of `fieldName: clsInstance` from dict. :param data: Dict where keys are field names and values are new values of field. :param model: Model class to which fields from :data: belong. """ model_provided = model is not None result = {} for name, new_value in data.items(): kwargs = { 'name': name, 'new_value': new_value, } if model_provided: kwargs['params'] = model.get_field_params(name) result[name] = cls(**kwargs) return result
python
def from_dict(cls, data, model): """ Generate map of `fieldName: clsInstance` from dict. :param data: Dict where keys are field names and values are new values of field. :param model: Model class to which fields from :data: belong. """ model_provided = model is not None result = {} for name, new_value in data.items(): kwargs = { 'name': name, 'new_value': new_value, } if model_provided: kwargs['params'] = model.get_field_params(name) result[name] = cls(**kwargs) return result
[ "def", "from_dict", "(", "cls", ",", "data", ",", "model", ")", ":", "model_provided", "=", "model", "is", "not", "None", "result", "=", "{", "}", "for", "name", ",", "new_value", "in", "data", ".", "items", "(", ")", ":", "kwargs", "=", "{", "'name'", ":", "name", ",", "'new_value'", ":", "new_value", ",", "}", "if", "model_provided", ":", "kwargs", "[", "'params'", "]", "=", "model", ".", "get_field_params", "(", "name", ")", "result", "[", "name", "]", "=", "cls", "(", "*", "*", "kwargs", ")", "return", "result" ]
Generate map of `fieldName: clsInstance` from dict. :param data: Dict where keys are field names and values are new values of field. :param model: Model class to which fields from :data: belong.
[ "Generate", "map", "of", "fieldName", ":", "clsInstance", "from", "dict", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/utils/data.py#L119-L136
train
ramses-tech/nefertari
nefertari/authentication/views.py
TicketAuthViewMixin.register
def register(self): """ Register new user by POSTing all required data. """ user, created = self.Model.create_account( self._json_params) if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user pk_field = user.pk_field() headers = remember(self.request, getattr(user, pk_field)) return JHTTPOk('Registered', headers=headers)
python
def register(self): """ Register new user by POSTing all required data. """ user, created = self.Model.create_account( self._json_params) if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user pk_field = user.pk_field() headers = remember(self.request, getattr(user, pk_field)) return JHTTPOk('Registered', headers=headers)
[ "def", "register", "(", "self", ")", ":", "user", ",", "created", "=", "self", ".", "Model", ".", "create_account", "(", "self", ".", "_json_params", ")", "if", "not", "created", ":", "raise", "JHTTPConflict", "(", "'Looks like you already have an account.'", ")", "self", ".", "request", ".", "_user", "=", "user", "pk_field", "=", "user", ".", "pk_field", "(", ")", "headers", "=", "remember", "(", "self", ".", "request", ",", "getattr", "(", "user", ",", "pk_field", ")", ")", "return", "JHTTPOk", "(", "'Registered'", ",", "headers", "=", "headers", ")" ]
Register new user by POSTing all required data.
[ "Register", "new", "user", "by", "POSTing", "all", "required", "data", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/authentication/views.py#L15-L26
train
ramses-tech/nefertari
nefertari/authentication/views.py
TokenAuthViewMixin.register
def register(self): """ Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ user, created = self.Model.create_account(self._json_params) if user.api_key is None: raise JHTTPBadRequest('Failed to generate ApiKey for user') if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user headers = remember(self.request, user.username) return JHTTPOk('Registered', headers=headers)
python
def register(self): """ Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ user, created = self.Model.create_account(self._json_params) if user.api_key is None: raise JHTTPBadRequest('Failed to generate ApiKey for user') if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user headers = remember(self.request, user.username) return JHTTPOk('Registered', headers=headers)
[ "def", "register", "(", "self", ")", ":", "user", ",", "created", "=", "self", ".", "Model", ".", "create_account", "(", "self", ".", "_json_params", ")", "if", "user", ".", "api_key", "is", "None", ":", "raise", "JHTTPBadRequest", "(", "'Failed to generate ApiKey for user'", ")", "if", "not", "created", ":", "raise", "JHTTPConflict", "(", "'Looks like you already have an account.'", ")", "self", ".", "request", ".", "_user", "=", "user", "headers", "=", "remember", "(", "self", ".", "request", ",", "user", ".", "username", ")", "return", "JHTTPOk", "(", "'Registered'", ",", "headers", "=", "headers", ")" ]
Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header.
[ "Register", "a", "new", "user", "by", "POSTing", "all", "required", "data", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/authentication/views.py#L109-L124
train
ramses-tech/nefertari
nefertari/authentication/views.py
TokenAuthViewMixin.claim_token
def claim_token(self, **params): """Claim current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ self._json_params.update(params) success, self.user = self.Model.authenticate_by_password( self._json_params) if success: headers = remember(self.request, self.user.username) return JHTTPOk('Token claimed', headers=headers) if self.user: raise JHTTPUnauthorized('Wrong login or password') else: raise JHTTPNotFound('User not found')
python
def claim_token(self, **params): """Claim current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ self._json_params.update(params) success, self.user = self.Model.authenticate_by_password( self._json_params) if success: headers = remember(self.request, self.user.username) return JHTTPOk('Token claimed', headers=headers) if self.user: raise JHTTPUnauthorized('Wrong login or password') else: raise JHTTPNotFound('User not found')
[ "def", "claim_token", "(", "self", ",", "*", "*", "params", ")", ":", "self", ".", "_json_params", ".", "update", "(", "params", ")", "success", ",", "self", ".", "user", "=", "self", ".", "Model", ".", "authenticate_by_password", "(", "self", ".", "_json_params", ")", "if", "success", ":", "headers", "=", "remember", "(", "self", ".", "request", ",", "self", ".", "user", ".", "username", ")", "return", "JHTTPOk", "(", "'Token claimed'", ",", "headers", "=", "headers", ")", "if", "self", ".", "user", ":", "raise", "JHTTPUnauthorized", "(", "'Wrong login or password'", ")", "else", ":", "raise", "JHTTPNotFound", "(", "'User not found'", ")" ]
Claim current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header.
[ "Claim", "current", "token", "by", "POSTing", "login", "and", "password", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/authentication/views.py#L126-L142
train
ramses-tech/nefertari
nefertari/authentication/views.py
TokenAuthViewMixin.reset_token
def reset_token(self, **params): """ Reset current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ response = self.claim_token(**params) if not self.user: return response self.user.api_key.reset_token() headers = remember(self.request, self.user.username) return JHTTPOk('Registered', headers=headers)
python
def reset_token(self, **params): """ Reset current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ response = self.claim_token(**params) if not self.user: return response self.user.api_key.reset_token() headers = remember(self.request, self.user.username) return JHTTPOk('Registered', headers=headers)
[ "def", "reset_token", "(", "self", ",", "*", "*", "params", ")", ":", "response", "=", "self", ".", "claim_token", "(", "*", "*", "params", ")", "if", "not", "self", ".", "user", ":", "return", "response", "self", ".", "user", ".", "api_key", ".", "reset_token", "(", ")", "headers", "=", "remember", "(", "self", ".", "request", ",", "self", ".", "user", ".", "username", ")", "return", "JHTTPOk", "(", "'Registered'", ",", "headers", "=", "headers", ")" ]
Reset current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header.
[ "Reset", "current", "token", "by", "POSTing", "login", "and", "password", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/authentication/views.py#L144-L156
train
ramses-tech/nefertari
nefertari/wrappers.py
apply_privacy._apply_nested_privacy
def _apply_nested_privacy(self, data): """ Apply privacy to nested documents. :param data: Dict of data to which privacy is already applied. """ kw = { 'is_admin': self.is_admin, 'drop_hidden': self.drop_hidden, } for key, val in data.items(): if is_document(val): data[key] = apply_privacy(self.request)(result=val, **kw) elif isinstance(val, list) and val and is_document(val[0]): data[key] = [apply_privacy(self.request)(result=doc, **kw) for doc in val] return data
python
def _apply_nested_privacy(self, data): """ Apply privacy to nested documents. :param data: Dict of data to which privacy is already applied. """ kw = { 'is_admin': self.is_admin, 'drop_hidden': self.drop_hidden, } for key, val in data.items(): if is_document(val): data[key] = apply_privacy(self.request)(result=val, **kw) elif isinstance(val, list) and val and is_document(val[0]): data[key] = [apply_privacy(self.request)(result=doc, **kw) for doc in val] return data
[ "def", "_apply_nested_privacy", "(", "self", ",", "data", ")", ":", "kw", "=", "{", "'is_admin'", ":", "self", ".", "is_admin", ",", "'drop_hidden'", ":", "self", ".", "drop_hidden", ",", "}", "for", "key", ",", "val", "in", "data", ".", "items", "(", ")", ":", "if", "is_document", "(", "val", ")", ":", "data", "[", "key", "]", "=", "apply_privacy", "(", "self", ".", "request", ")", "(", "result", "=", "val", ",", "*", "*", "kw", ")", "elif", "isinstance", "(", "val", ",", "list", ")", "and", "val", "and", "is_document", "(", "val", "[", "0", "]", ")", ":", "data", "[", "key", "]", "=", "[", "apply_privacy", "(", "self", ".", "request", ")", "(", "result", "=", "doc", ",", "*", "*", "kw", ")", "for", "doc", "in", "val", "]", "return", "data" ]
Apply privacy to nested documents. :param data: Dict of data to which privacy is already applied.
[ "Apply", "privacy", "to", "nested", "documents", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/wrappers.py#L191-L206
train
ramses-tech/nefertari
nefertari/wrappers.py
add_object_url._set_object_self
def _set_object_self(self, obj): """ Add '_self' key value to :obj: dict. """ from nefertari.elasticsearch import ES location = self.request.path_url route_kwargs = {} """ Check for parents """ if self.request.matchdict: route_kwargs.update(self.request.matchdict) try: type_, obj_pk = obj['_type'], obj['_pk'] except KeyError: return resource = (self.model_collections.get(type_) or self.model_collections.get(ES.src2type(type_))) if resource is not None: route_kwargs.update({resource.id_name: obj_pk}) location = self.request.route_url( resource.uid, **route_kwargs) obj.setdefault('_self', location)
python
def _set_object_self(self, obj): """ Add '_self' key value to :obj: dict. """ from nefertari.elasticsearch import ES location = self.request.path_url route_kwargs = {} """ Check for parents """ if self.request.matchdict: route_kwargs.update(self.request.matchdict) try: type_, obj_pk = obj['_type'], obj['_pk'] except KeyError: return resource = (self.model_collections.get(type_) or self.model_collections.get(ES.src2type(type_))) if resource is not None: route_kwargs.update({resource.id_name: obj_pk}) location = self.request.route_url( resource.uid, **route_kwargs) obj.setdefault('_self', location)
[ "def", "_set_object_self", "(", "self", ",", "obj", ")", ":", "from", "nefertari", ".", "elasticsearch", "import", "ES", "location", "=", "self", ".", "request", ".", "path_url", "route_kwargs", "=", "{", "}", "\"\"\" Check for parents \"\"\"", "if", "self", ".", "request", ".", "matchdict", ":", "route_kwargs", ".", "update", "(", "self", ".", "request", ".", "matchdict", ")", "try", ":", "type_", ",", "obj_pk", "=", "obj", "[", "'_type'", "]", ",", "obj", "[", "'_pk'", "]", "except", "KeyError", ":", "return", "resource", "=", "(", "self", ".", "model_collections", ".", "get", "(", "type_", ")", "or", "self", ".", "model_collections", ".", "get", "(", "ES", ".", "src2type", "(", "type_", ")", ")", ")", "if", "resource", "is", "not", "None", ":", "route_kwargs", ".", "update", "(", "{", "resource", ".", "id_name", ":", "obj_pk", "}", ")", "location", "=", "self", ".", "request", ".", "route_url", "(", "resource", ".", "uid", ",", "*", "*", "route_kwargs", ")", "obj", ".", "setdefault", "(", "'_self'", ",", "location", ")" ]
Add '_self' key value to :obj: dict.
[ "Add", "_self", "key", "value", "to", ":", "obj", ":", "dict", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/wrappers.py#L300-L319
train
ramses-tech/nefertari
nefertari/resource.py
get_root_resource
def get_root_resource(config): """Returns the root resource.""" app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
python
def get_root_resource(config): """Returns the root resource.""" app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
[ "def", "get_root_resource", "(", "config", ")", ":", "app_package_name", "=", "get_app_package_name", "(", "config", ")", "return", "config", ".", "registry", ".", "_root_resources", ".", "setdefault", "(", "app_package_name", ",", "Resource", "(", "config", ")", ")" ]
Returns the root resource.
[ "Returns", "the", "root", "resource", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L46-L50
train
ramses-tech/nefertari
nefertari/resource.py
add_resource_routes
def add_resource_routes(config, view, member_name, collection_name, **kwargs): """ ``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message" """ view = maybe_dotted(view) path_prefix = kwargs.pop('path_prefix', '') name_prefix = kwargs.pop('name_prefix', '') if config.route_prefix: name_prefix = "%s_%s" % (config.route_prefix, name_prefix) if collection_name: id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME) else: id_name = '' path = path_prefix.strip('/') + '/' + (collection_name or member_name) _factory = kwargs.pop('factory', None) # If factory is not set, than auth should be False _auth = kwargs.pop('auth', None) and _factory _traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None action_route = {} added_routes = {} def add_route_and_view(config, action, route_name, path, request_method, **route_kwargs): if route_name not in added_routes: config.add_route( route_name, path, factory=_factory, request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'], **route_kwargs) added_routes[route_name] = path action_route[action] = route_name if _auth: permission = PERMISSIONS[action] else: permission = None config.add_view(view=view, attr=action, route_name=route_name, request_method=request_method, permission=permission, **kwargs) config.commit() if collection_name == member_name: collection_name = collection_name + '_collection' if collection_name: add_route_and_view( config, 'index', name_prefix + collection_name, path, 'GET') add_route_and_view( config, 'collection_options', name_prefix + collection_name, path, 'OPTIONS') add_route_and_view( config, 'show', name_prefix + member_name, path + id_name, 'GET', traverse=_traverse) add_route_and_view( config, 'item_options', name_prefix + member_name, path + id_name, 'OPTIONS', traverse=_traverse) add_route_and_view( config, 'replace', name_prefix + member_name, path + id_name, 'PUT', traverse=_traverse) add_route_and_view( config, 'update', name_prefix + member_name, path + id_name, 'PATCH', traverse=_traverse) add_route_and_view( config, 'create', name_prefix + (collection_name or member_name), path, 'POST') add_route_and_view( config, 'delete', name_prefix + member_name, path + id_name, 'DELETE', traverse=_traverse) if collection_name: add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PUT', traverse=_traverse) add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PATCH', traverse=_traverse) add_route_and_view( config, 'delete_many', name_prefix + (collection_name or member_name), path, 'DELETE', traverse=_traverse) return action_route
python
def add_resource_routes(config, view, member_name, collection_name, **kwargs): """ ``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message" """ view = maybe_dotted(view) path_prefix = kwargs.pop('path_prefix', '') name_prefix = kwargs.pop('name_prefix', '') if config.route_prefix: name_prefix = "%s_%s" % (config.route_prefix, name_prefix) if collection_name: id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME) else: id_name = '' path = path_prefix.strip('/') + '/' + (collection_name or member_name) _factory = kwargs.pop('factory', None) # If factory is not set, than auth should be False _auth = kwargs.pop('auth', None) and _factory _traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None action_route = {} added_routes = {} def add_route_and_view(config, action, route_name, path, request_method, **route_kwargs): if route_name not in added_routes: config.add_route( route_name, path, factory=_factory, request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'], **route_kwargs) added_routes[route_name] = path action_route[action] = route_name if _auth: permission = PERMISSIONS[action] else: permission = None config.add_view(view=view, attr=action, route_name=route_name, request_method=request_method, permission=permission, **kwargs) config.commit() if collection_name == member_name: collection_name = collection_name + '_collection' if collection_name: add_route_and_view( config, 'index', name_prefix + collection_name, path, 'GET') add_route_and_view( config, 'collection_options', name_prefix + collection_name, path, 'OPTIONS') add_route_and_view( config, 'show', name_prefix + member_name, path + id_name, 'GET', traverse=_traverse) add_route_and_view( config, 'item_options', name_prefix + member_name, path + id_name, 'OPTIONS', traverse=_traverse) add_route_and_view( config, 'replace', name_prefix + member_name, path + id_name, 'PUT', traverse=_traverse) add_route_and_view( config, 'update', name_prefix + member_name, path + id_name, 'PATCH', traverse=_traverse) add_route_and_view( config, 'create', name_prefix + (collection_name or member_name), path, 'POST') add_route_and_view( config, 'delete', name_prefix + member_name, path + id_name, 'DELETE', traverse=_traverse) if collection_name: add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PUT', traverse=_traverse) add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PATCH', traverse=_traverse) add_route_and_view( config, 'delete_many', name_prefix + (collection_name or member_name), path, 'DELETE', traverse=_traverse) return action_route
[ "def", "add_resource_routes", "(", "config", ",", "view", ",", "member_name", ",", "collection_name", ",", "*", "*", "kwargs", ")", ":", "view", "=", "maybe_dotted", "(", "view", ")", "path_prefix", "=", "kwargs", ".", "pop", "(", "'path_prefix'", ",", "''", ")", "name_prefix", "=", "kwargs", ".", "pop", "(", "'name_prefix'", ",", "''", ")", "if", "config", ".", "route_prefix", ":", "name_prefix", "=", "\"%s_%s\"", "%", "(", "config", ".", "route_prefix", ",", "name_prefix", ")", "if", "collection_name", ":", "id_name", "=", "'/{%s}'", "%", "(", "kwargs", ".", "pop", "(", "'id_name'", ",", "None", ")", "or", "DEFAULT_ID_NAME", ")", "else", ":", "id_name", "=", "''", "path", "=", "path_prefix", ".", "strip", "(", "'/'", ")", "+", "'/'", "+", "(", "collection_name", "or", "member_name", ")", "_factory", "=", "kwargs", ".", "pop", "(", "'factory'", ",", "None", ")", "# If factory is not set, than auth should be False", "_auth", "=", "kwargs", ".", "pop", "(", "'auth'", ",", "None", ")", "and", "_factory", "_traverse", "=", "(", "kwargs", ".", "pop", "(", "'traverse'", ",", "None", ")", "or", "id_name", ")", "if", "_factory", "else", "None", "action_route", "=", "{", "}", "added_routes", "=", "{", "}", "def", "add_route_and_view", "(", "config", ",", "action", ",", "route_name", ",", "path", ",", "request_method", ",", "*", "*", "route_kwargs", ")", ":", "if", "route_name", "not", "in", "added_routes", ":", "config", ".", "add_route", "(", "route_name", ",", "path", ",", "factory", "=", "_factory", ",", "request_method", "=", "[", "'GET'", ",", "'POST'", ",", "'PUT'", ",", "'PATCH'", ",", "'DELETE'", ",", "'OPTIONS'", "]", ",", "*", "*", "route_kwargs", ")", "added_routes", "[", "route_name", "]", "=", "path", "action_route", "[", "action", "]", "=", "route_name", "if", "_auth", ":", "permission", "=", "PERMISSIONS", "[", "action", "]", "else", ":", "permission", "=", "None", "config", ".", "add_view", "(", "view", "=", "view", ",", "attr", "=", "action", ",", "route_name", "=", "route_name", ",", "request_method", "=", "request_method", ",", "permission", "=", "permission", ",", "*", "*", "kwargs", ")", "config", ".", "commit", "(", ")", "if", "collection_name", "==", "member_name", ":", "collection_name", "=", "collection_name", "+", "'_collection'", "if", "collection_name", ":", "add_route_and_view", "(", "config", ",", "'index'", ",", "name_prefix", "+", "collection_name", ",", "path", ",", "'GET'", ")", "add_route_and_view", "(", "config", ",", "'collection_options'", ",", "name_prefix", "+", "collection_name", ",", "path", ",", "'OPTIONS'", ")", "add_route_and_view", "(", "config", ",", "'show'", ",", "name_prefix", "+", "member_name", ",", "path", "+", "id_name", ",", "'GET'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'item_options'", ",", "name_prefix", "+", "member_name", ",", "path", "+", "id_name", ",", "'OPTIONS'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'replace'", ",", "name_prefix", "+", "member_name", ",", "path", "+", "id_name", ",", "'PUT'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'update'", ",", "name_prefix", "+", "member_name", ",", "path", "+", "id_name", ",", "'PATCH'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'create'", ",", "name_prefix", "+", "(", "collection_name", "or", "member_name", ")", ",", "path", ",", "'POST'", ")", "add_route_and_view", "(", "config", ",", "'delete'", ",", "name_prefix", "+", "member_name", ",", "path", "+", "id_name", ",", "'DELETE'", ",", "traverse", "=", "_traverse", ")", "if", "collection_name", ":", "add_route_and_view", "(", "config", ",", "'update_many'", ",", "name_prefix", "+", "(", "collection_name", "or", "member_name", ")", ",", "path", ",", "'PUT'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'update_many'", ",", "name_prefix", "+", "(", "collection_name", "or", "member_name", ")", ",", "path", ",", "'PATCH'", ",", "traverse", "=", "_traverse", ")", "add_route_and_view", "(", "config", ",", "'delete_many'", ",", "name_prefix", "+", "(", "collection_name", "or", "member_name", ")", ",", "path", ",", "'DELETE'", ",", "traverse", "=", "_traverse", ")", "return", "action_route" ]
``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message"
[ "view", "is", "a", "dotted", "name", "of", "(", "or", "direct", "reference", "to", ")", "a", "Python", "view", "class", "e", ".", "g", ".", "my", ".", "package", ".", "views", ".", "MyView", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L57-L190
train
ramses-tech/nefertari
nefertari/resource.py
get_default_view_path
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
python
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
[ "def", "get_default_view_path", "(", "resource", ")", ":", "parts", "=", "[", "a", ".", "member_name", "for", "a", "in", "resource", ".", "ancestors", "]", "+", "[", "resource", ".", "collection_name", "or", "resource", ".", "member_name", "]", "if", "resource", ".", "prefix", ":", "parts", ".", "insert", "(", "-", "1", ",", "resource", ".", "prefix", ")", "view_file", "=", "'%s'", "%", "'_'", ".", "join", "(", "parts", ")", "view", "=", "'%s:%sView'", "%", "(", "view_file", ",", "snake2camel", "(", "view_file", ")", ")", "app_package_name", "=", "get_app_package_name", "(", "resource", ".", "config", ")", "return", "'%s.views.%s'", "%", "(", "app_package_name", ",", "view", ")" ]
Returns the dotted path to the default view class.
[ "Returns", "the", "dotted", "path", "to", "the", "default", "view", "class", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L193-L206
train
ramses-tech/nefertari
nefertari/resource.py
Resource.get_ancestors
def get_ancestors(self): "Returns the list of ancestor resources." if self._ancestors: return self._ancestors if not self.parent: return [] obj = self.resource_map.get(self.parent.uid) while obj and obj.member_name: self._ancestors.append(obj) obj = obj.parent self._ancestors.reverse() return self._ancestors
python
def get_ancestors(self): "Returns the list of ancestor resources." if self._ancestors: return self._ancestors if not self.parent: return [] obj = self.resource_map.get(self.parent.uid) while obj and obj.member_name: self._ancestors.append(obj) obj = obj.parent self._ancestors.reverse() return self._ancestors
[ "def", "get_ancestors", "(", "self", ")", ":", "if", "self", ".", "_ancestors", ":", "return", "self", ".", "_ancestors", "if", "not", "self", ".", "parent", ":", "return", "[", "]", "obj", "=", "self", ".", "resource_map", ".", "get", "(", "self", ".", "parent", ".", "uid", ")", "while", "obj", "and", "obj", ".", "member_name", ":", "self", ".", "_ancestors", ".", "append", "(", "obj", ")", "obj", "=", "obj", ".", "parent", "self", ".", "_ancestors", ".", "reverse", "(", ")", "return", "self", ".", "_ancestors" ]
Returns the list of ancestor resources.
[ "Returns", "the", "list", "of", "ancestor", "resources", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L231-L247
train
ramses-tech/nefertari
nefertari/resource.py
Resource.add
def add(self, member_name, collection_name='', parent=None, uid='', **kwargs): """ :param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object """ # self is the parent resource on which this method is called. parent = (self.resource_map.get(parent) if type(parent) is str else parent or self) prefix = kwargs.pop('prefix', '') uid = (uid or ':'.join(filter(bool, [parent.uid, prefix, member_name]))) if uid in self.resource_map: raise ValueError('%s already exists in resource map' % uid) # Use id_name of parent for singular views to make url generation # easier id_name = kwargs.get('id_name', '') if not id_name and parent: id_name = parent.id_name new_resource = Resource(self.config, member_name=member_name, collection_name=collection_name, parent=parent, uid=uid, id_name=id_name, prefix=prefix) view = maybe_dotted( kwargs.pop('view', None) or get_default_view_path(new_resource)) for name, val in kwargs.pop('view_args', {}).items(): setattr(view, name, val) root_resource = self.config.get_root_resource() view.root_resource = root_resource new_resource.view = view path_segs = [] kwargs['path_prefix'] = '' for res in new_resource.ancestors: if not res.is_singular: if res.id_name: id_full = res.id_name else: id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME) path_segs.append('%s/{%s}' % (res.collection_name, id_full)) else: path_segs.append(res.member_name) if path_segs: kwargs['path_prefix'] = '/'.join(path_segs) if prefix: kwargs['path_prefix'] += '/' + prefix name_segs = [a.member_name for a in new_resource.ancestors] name_segs.insert(1, prefix) name_segs = [seg for seg in name_segs if seg] if name_segs: kwargs['name_prefix'] = '_'.join(name_segs) + ':' new_resource.renderer = kwargs.setdefault( 'renderer', view._default_renderer) kwargs.setdefault('auth', root_resource.auth) kwargs.setdefault('factory', root_resource.default_factory) _factory = maybe_dotted(kwargs['factory']) kwargs['auth'] = kwargs.get('auth', root_resource.auth) kwargs['http_cache'] = kwargs.get( 'http_cache', root_resource.http_cache) new_resource.action_route_map = add_resource_routes( self.config, view, member_name, collection_name, **kwargs) self.resource_map[uid] = new_resource # add all route names for this resource as keys in the dict, # so its easy to find it in the view. self.resource_map.update(dict.fromkeys( list(new_resource.action_route_map.values()), new_resource)) # Store resources in {modelName: resource} map if: # * Its view has Model defined # * It's not singular # * Its parent is root or it's not already stored model = new_resource.view.Model is_collection = model is not None and not new_resource.is_singular if is_collection: is_needed = (model.__name__ not in self.model_collections or new_resource.parent is root_resource) if is_needed: self.model_collections[model.__name__] = new_resource parent.children.append(new_resource) view._resource = new_resource view._factory = _factory return new_resource
python
def add(self, member_name, collection_name='', parent=None, uid='', **kwargs): """ :param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object """ # self is the parent resource on which this method is called. parent = (self.resource_map.get(parent) if type(parent) is str else parent or self) prefix = kwargs.pop('prefix', '') uid = (uid or ':'.join(filter(bool, [parent.uid, prefix, member_name]))) if uid in self.resource_map: raise ValueError('%s already exists in resource map' % uid) # Use id_name of parent for singular views to make url generation # easier id_name = kwargs.get('id_name', '') if not id_name and parent: id_name = parent.id_name new_resource = Resource(self.config, member_name=member_name, collection_name=collection_name, parent=parent, uid=uid, id_name=id_name, prefix=prefix) view = maybe_dotted( kwargs.pop('view', None) or get_default_view_path(new_resource)) for name, val in kwargs.pop('view_args', {}).items(): setattr(view, name, val) root_resource = self.config.get_root_resource() view.root_resource = root_resource new_resource.view = view path_segs = [] kwargs['path_prefix'] = '' for res in new_resource.ancestors: if not res.is_singular: if res.id_name: id_full = res.id_name else: id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME) path_segs.append('%s/{%s}' % (res.collection_name, id_full)) else: path_segs.append(res.member_name) if path_segs: kwargs['path_prefix'] = '/'.join(path_segs) if prefix: kwargs['path_prefix'] += '/' + prefix name_segs = [a.member_name for a in new_resource.ancestors] name_segs.insert(1, prefix) name_segs = [seg for seg in name_segs if seg] if name_segs: kwargs['name_prefix'] = '_'.join(name_segs) + ':' new_resource.renderer = kwargs.setdefault( 'renderer', view._default_renderer) kwargs.setdefault('auth', root_resource.auth) kwargs.setdefault('factory', root_resource.default_factory) _factory = maybe_dotted(kwargs['factory']) kwargs['auth'] = kwargs.get('auth', root_resource.auth) kwargs['http_cache'] = kwargs.get( 'http_cache', root_resource.http_cache) new_resource.action_route_map = add_resource_routes( self.config, view, member_name, collection_name, **kwargs) self.resource_map[uid] = new_resource # add all route names for this resource as keys in the dict, # so its easy to find it in the view. self.resource_map.update(dict.fromkeys( list(new_resource.action_route_map.values()), new_resource)) # Store resources in {modelName: resource} map if: # * Its view has Model defined # * It's not singular # * Its parent is root or it's not already stored model = new_resource.view.Model is_collection = model is not None and not new_resource.is_singular if is_collection: is_needed = (model.__name__ not in self.model_collections or new_resource.parent is root_resource) if is_needed: self.model_collections[model.__name__] = new_resource parent.children.append(new_resource) view._resource = new_resource view._factory = _factory return new_resource
[ "def", "add", "(", "self", ",", "member_name", ",", "collection_name", "=", "''", ",", "parent", "=", "None", ",", "uid", "=", "''", ",", "*", "*", "kwargs", ")", ":", "# self is the parent resource on which this method is called.", "parent", "=", "(", "self", ".", "resource_map", ".", "get", "(", "parent", ")", "if", "type", "(", "parent", ")", "is", "str", "else", "parent", "or", "self", ")", "prefix", "=", "kwargs", ".", "pop", "(", "'prefix'", ",", "''", ")", "uid", "=", "(", "uid", "or", "':'", ".", "join", "(", "filter", "(", "bool", ",", "[", "parent", ".", "uid", ",", "prefix", ",", "member_name", "]", ")", ")", ")", "if", "uid", "in", "self", ".", "resource_map", ":", "raise", "ValueError", "(", "'%s already exists in resource map'", "%", "uid", ")", "# Use id_name of parent for singular views to make url generation", "# easier", "id_name", "=", "kwargs", ".", "get", "(", "'id_name'", ",", "''", ")", "if", "not", "id_name", "and", "parent", ":", "id_name", "=", "parent", ".", "id_name", "new_resource", "=", "Resource", "(", "self", ".", "config", ",", "member_name", "=", "member_name", ",", "collection_name", "=", "collection_name", ",", "parent", "=", "parent", ",", "uid", "=", "uid", ",", "id_name", "=", "id_name", ",", "prefix", "=", "prefix", ")", "view", "=", "maybe_dotted", "(", "kwargs", ".", "pop", "(", "'view'", ",", "None", ")", "or", "get_default_view_path", "(", "new_resource", ")", ")", "for", "name", ",", "val", "in", "kwargs", ".", "pop", "(", "'view_args'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "setattr", "(", "view", ",", "name", ",", "val", ")", "root_resource", "=", "self", ".", "config", ".", "get_root_resource", "(", ")", "view", ".", "root_resource", "=", "root_resource", "new_resource", ".", "view", "=", "view", "path_segs", "=", "[", "]", "kwargs", "[", "'path_prefix'", "]", "=", "''", "for", "res", "in", "new_resource", ".", "ancestors", ":", "if", "not", "res", ".", "is_singular", ":", "if", "res", ".", "id_name", ":", "id_full", "=", "res", ".", "id_name", "else", ":", "id_full", "=", "\"%s_%s\"", "%", "(", "res", ".", "member_name", ",", "DEFAULT_ID_NAME", ")", "path_segs", ".", "append", "(", "'%s/{%s}'", "%", "(", "res", ".", "collection_name", ",", "id_full", ")", ")", "else", ":", "path_segs", ".", "append", "(", "res", ".", "member_name", ")", "if", "path_segs", ":", "kwargs", "[", "'path_prefix'", "]", "=", "'/'", ".", "join", "(", "path_segs", ")", "if", "prefix", ":", "kwargs", "[", "'path_prefix'", "]", "+=", "'/'", "+", "prefix", "name_segs", "=", "[", "a", ".", "member_name", "for", "a", "in", "new_resource", ".", "ancestors", "]", "name_segs", ".", "insert", "(", "1", ",", "prefix", ")", "name_segs", "=", "[", "seg", "for", "seg", "in", "name_segs", "if", "seg", "]", "if", "name_segs", ":", "kwargs", "[", "'name_prefix'", "]", "=", "'_'", ".", "join", "(", "name_segs", ")", "+", "':'", "new_resource", ".", "renderer", "=", "kwargs", ".", "setdefault", "(", "'renderer'", ",", "view", ".", "_default_renderer", ")", "kwargs", ".", "setdefault", "(", "'auth'", ",", "root_resource", ".", "auth", ")", "kwargs", ".", "setdefault", "(", "'factory'", ",", "root_resource", ".", "default_factory", ")", "_factory", "=", "maybe_dotted", "(", "kwargs", "[", "'factory'", "]", ")", "kwargs", "[", "'auth'", "]", "=", "kwargs", ".", "get", "(", "'auth'", ",", "root_resource", ".", "auth", ")", "kwargs", "[", "'http_cache'", "]", "=", "kwargs", ".", "get", "(", "'http_cache'", ",", "root_resource", ".", "http_cache", ")", "new_resource", ".", "action_route_map", "=", "add_resource_routes", "(", "self", ".", "config", ",", "view", ",", "member_name", ",", "collection_name", ",", "*", "*", "kwargs", ")", "self", ".", "resource_map", "[", "uid", "]", "=", "new_resource", "# add all route names for this resource as keys in the dict,", "# so its easy to find it in the view.", "self", ".", "resource_map", ".", "update", "(", "dict", ".", "fromkeys", "(", "list", "(", "new_resource", ".", "action_route_map", ".", "values", "(", ")", ")", ",", "new_resource", ")", ")", "# Store resources in {modelName: resource} map if:", "# * Its view has Model defined", "# * It's not singular", "# * Its parent is root or it's not already stored", "model", "=", "new_resource", ".", "view", ".", "Model", "is_collection", "=", "model", "is", "not", "None", "and", "not", "new_resource", ".", "is_singular", "if", "is_collection", ":", "is_needed", "=", "(", "model", ".", "__name__", "not", "in", "self", ".", "model_collections", "or", "new_resource", ".", "parent", "is", "root_resource", ")", "if", "is_needed", ":", "self", ".", "model_collections", "[", "model", ".", "__name__", "]", "=", "new_resource", "parent", ".", "children", ".", "append", "(", "new_resource", ")", "view", ".", "_resource", "=", "new_resource", "view", ".", "_factory", "=", "_factory", "return", "new_resource" ]
:param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object
[ ":", "param", "member_name", ":", "singular", "name", "of", "the", "resource", ".", "It", "should", "be", "the", "appropriate", "singular", "version", "of", "the", "resource", "given", "your", "locale", "and", "used", "with", "members", "of", "the", "collection", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L257-L379
train
ramses-tech/nefertari
nefertari/resource.py
Resource.add_from_child
def add_from_child(self, resource, **kwargs): """ Add a resource with its all children resources to the current resource. """ new_resource = self.add( resource.member_name, resource.collection_name, **kwargs) for child in resource.children: new_resource.add_from_child(child, **kwargs)
python
def add_from_child(self, resource, **kwargs): """ Add a resource with its all children resources to the current resource. """ new_resource = self.add( resource.member_name, resource.collection_name, **kwargs) for child in resource.children: new_resource.add_from_child(child, **kwargs)
[ "def", "add_from_child", "(", "self", ",", "resource", ",", "*", "*", "kwargs", ")", ":", "new_resource", "=", "self", ".", "add", "(", "resource", ".", "member_name", ",", "resource", ".", "collection_name", ",", "*", "*", "kwargs", ")", "for", "child", "in", "resource", ".", "children", ":", "new_resource", ".", "add_from_child", "(", "child", ",", "*", "*", "kwargs", ")" ]
Add a resource with its all children resources to the current resource.
[ "Add", "a", "resource", "with", "its", "all", "children", "resources", "to", "the", "current", "resource", "." ]
c7caffe11576c11aa111adbdbadeff70ce66b1dd
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/resource.py#L381-L389
train
opengridcc/opengrid
opengrid/datasets/datasets.py
DatasetContainer.add
def add(self, path): """ Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str """ name_with_ext = os.path.split(path)[1] # split directory and filename name = name_with_ext.split('.')[0] # remove extension self.list.update({name: path})
python
def add(self, path): """ Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str """ name_with_ext = os.path.split(path)[1] # split directory and filename name = name_with_ext.split('.')[0] # remove extension self.list.update({name: path})
[ "def", "add", "(", "self", ",", "path", ")", ":", "name_with_ext", "=", "os", ".", "path", ".", "split", "(", "path", ")", "[", "1", "]", "# split directory and filename", "name", "=", "name_with_ext", ".", "split", "(", "'.'", ")", "[", "0", "]", "# remove extension", "self", ".", "list", ".", "update", "(", "{", "name", ":", "path", "}", ")" ]
Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str
[ "Add", "the", "path", "of", "a", "data", "set", "to", "the", "list", "of", "available", "sets" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/datasets/datasets.py#L27-L40
train
opengridcc/opengrid
opengrid/datasets/datasets.py
DatasetContainer.unpack
def unpack(self, name): """ Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame """ path = self.list[name] df = pd.read_pickle(path, compression='gzip') return df
python
def unpack(self, name): """ Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame """ path = self.list[name] df = pd.read_pickle(path, compression='gzip') return df
[ "def", "unpack", "(", "self", ",", "name", ")", ":", "path", "=", "self", ".", "list", "[", "name", "]", "df", "=", "pd", ".", "read_pickle", "(", "path", ",", "compression", "=", "'gzip'", ")", "return", "df" ]
Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame
[ "Unpacks", "a", "data", "set", "to", "a", "Pandas", "DataFrame" ]
69b8da3c8fcea9300226c45ef0628cd6d4307651
https://github.com/opengridcc/opengrid/blob/69b8da3c8fcea9300226c45ef0628cd6d4307651/opengrid/datasets/datasets.py#L42-L57
train
christophertbrown/bioscripts
ctbBio/sixframe.py
six_frame
def six_frame(genome, table, minimum = 10): """ translate each sequence into six reading frames """ for seq in parse_fasta(genome): dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna) counter = 0 for sequence in ['f', dna], ['rc', dna.reverse_complement()]: direction, sequence = sequence for frame in range(0, 3): for prot in \ sequence[frame:].\ translate(table = table, to_stop = False).split('*'): if len(prot) < minimum: continue counter += 1 header = '%s_%s table=%s frame=%s-%s %s' % \ (seq[0].split()[0], counter, table, frame+1, \ direction, ' '.join(seq[0].split()[1:])) yield [header, prot]
python
def six_frame(genome, table, minimum = 10): """ translate each sequence into six reading frames """ for seq in parse_fasta(genome): dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna) counter = 0 for sequence in ['f', dna], ['rc', dna.reverse_complement()]: direction, sequence = sequence for frame in range(0, 3): for prot in \ sequence[frame:].\ translate(table = table, to_stop = False).split('*'): if len(prot) < minimum: continue counter += 1 header = '%s_%s table=%s frame=%s-%s %s' % \ (seq[0].split()[0], counter, table, frame+1, \ direction, ' '.join(seq[0].split()[1:])) yield [header, prot]
[ "def", "six_frame", "(", "genome", ",", "table", ",", "minimum", "=", "10", ")", ":", "for", "seq", "in", "parse_fasta", "(", "genome", ")", ":", "dna", "=", "Seq", "(", "seq", "[", "1", "]", ".", "upper", "(", ")", ".", "replace", "(", "'U'", ",", "'T'", ")", ",", "IUPAC", ".", "ambiguous_dna", ")", "counter", "=", "0", "for", "sequence", "in", "[", "'f'", ",", "dna", "]", ",", "[", "'rc'", ",", "dna", ".", "reverse_complement", "(", ")", "]", ":", "direction", ",", "sequence", "=", "sequence", "for", "frame", "in", "range", "(", "0", ",", "3", ")", ":", "for", "prot", "in", "sequence", "[", "frame", ":", "]", ".", "translate", "(", "table", "=", "table", ",", "to_stop", "=", "False", ")", ".", "split", "(", "'*'", ")", ":", "if", "len", "(", "prot", ")", "<", "minimum", ":", "continue", "counter", "+=", "1", "header", "=", "'%s_%s table=%s frame=%s-%s %s'", "%", "(", "seq", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ",", "counter", ",", "table", ",", "frame", "+", "1", ",", "direction", ",", "' '", ".", "join", "(", "seq", "[", "0", "]", ".", "split", "(", ")", "[", "1", ":", "]", ")", ")", "yield", "[", "header", ",", "prot", "]" ]
translate each sequence into six reading frames
[ "translate", "each", "sequence", "into", "six", "reading", "frames" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/sixframe.py#L13-L32
train
jay-johnson/network-pipeline
network_pipeline/scripts/network_agent.py
publish_processed_network_packets
def publish_processed_network_packets( name="not-set", task_queue=None, result_queue=None, need_response=False, shutdown_msg="SHUTDOWN"): """ # Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json" """ # these keys need to be cycled to prevent # exploiting static keys filter_key = ev("IGNORE_KEY", INCLUDED_IGNORE_KEY) forward_host = ev("FORWARD_HOST", "127.0.0.1") forward_port = int(ev("FORWARD_PORT", "80")) include_filter_key = ev("FILTER_KEY", "") if not include_filter_key and filter_key: include_filter_key = filter_key filter_keys = [filter_key] log.info(("START consumer={} " "forward={}:{} with " "key={} filters={}") .format(name, forward_host, forward_port, include_filter_key, filter_key)) forward_skt = None not_done = True while not_done: if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) next_task = task_queue.get() if next_task: if str(next_task) == shutdown_msg: # Poison pill for shutting down log.info(("{}: DONE CALLBACK " "Exiting msg={}") .format(name, next_task)) task_queue.task_done() break # end of handling shutdown case try: log.debug(("{} parsing") .format(name)) source = next_task.source packet = next_task.payload if not packet: log.error(("{} invalid task found " "{} missing payload") .format(name, next_task)) break log.debug(("{} found msg from src={}") .format(name, source)) network_data = parse_network_data( data_packet=packet, include_filter_key=include_filter_key, filter_keys=filter_keys) if network_data["status"] == VALID: if network_data["data_type"] == TCP \ or network_data["data_type"] == UDP \ or network_data["data_type"] == ARP \ or network_data["data_type"] == ICMP: log.info(("{} valid={} packet={} " "data={}") .format(name, network_data["id"], network_data["data_type"], network_data["target_data"])) if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) if forward_skt: if network_data["stream"]: sent = False while not sent: try: log.info("sending={}".format( network_data["stream"])) send_msg( forward_skt, network_data["stream"] .encode("utf-8")) sent = True except Exception as e: sent = False time.sleep(0.5) try: forward_skt.close() forward_skt = None except Exception as w: forward_skt = None forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) # end of reconnecting log.info("sent={}".format( network_data["stream"])) if need_response: log.info("receiving") cdr_res = forward_skt.recv(1024) log.info(("cdr - res{}") .format(cdr_res)) else: log.info(("{} EMPTY stream={} " "error={} status={}") .format( name, network_data["stream"], network_data["err"], network_data["status"])) else: log.info(("{} not_supported valid={} " "packet data_type={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["status"])) elif network_data["status"] == FILTERED: log.info(("{} filtered={} status={}") .format(name, network_data["filtered"], network_data["status"])) else: if network_data["status"] == INVALID: log.info(("{} invalid={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) else: log.info(("{} unknown={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) # end of if valid or not data except KeyboardInterrupt as k: log.info(("{} stopping") .format(name)) break except Exception as e: log.error(("{} failed packaging packet to forward " "with ex={}") .format(name, e)) break # end of try/ex during payload processing # end of if found a next_task log.info(("Consumer: {} {}") .format(name, next_task)) task_queue.task_done() if need_response: answer = "processed: {}".format(next_task()) result_queue.put(answer) # end of while if forward_skt: try: forward_skt.close() log.info("CLOSED connection") forward_skt = None except Exception: log.info("CLOSED connection") # end of cleaning up forwarding socket log.info("{} Done".format(name)) return
python
def publish_processed_network_packets( name="not-set", task_queue=None, result_queue=None, need_response=False, shutdown_msg="SHUTDOWN"): """ # Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json" """ # these keys need to be cycled to prevent # exploiting static keys filter_key = ev("IGNORE_KEY", INCLUDED_IGNORE_KEY) forward_host = ev("FORWARD_HOST", "127.0.0.1") forward_port = int(ev("FORWARD_PORT", "80")) include_filter_key = ev("FILTER_KEY", "") if not include_filter_key and filter_key: include_filter_key = filter_key filter_keys = [filter_key] log.info(("START consumer={} " "forward={}:{} with " "key={} filters={}") .format(name, forward_host, forward_port, include_filter_key, filter_key)) forward_skt = None not_done = True while not_done: if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) next_task = task_queue.get() if next_task: if str(next_task) == shutdown_msg: # Poison pill for shutting down log.info(("{}: DONE CALLBACK " "Exiting msg={}") .format(name, next_task)) task_queue.task_done() break # end of handling shutdown case try: log.debug(("{} parsing") .format(name)) source = next_task.source packet = next_task.payload if not packet: log.error(("{} invalid task found " "{} missing payload") .format(name, next_task)) break log.debug(("{} found msg from src={}") .format(name, source)) network_data = parse_network_data( data_packet=packet, include_filter_key=include_filter_key, filter_keys=filter_keys) if network_data["status"] == VALID: if network_data["data_type"] == TCP \ or network_data["data_type"] == UDP \ or network_data["data_type"] == ARP \ or network_data["data_type"] == ICMP: log.info(("{} valid={} packet={} " "data={}") .format(name, network_data["id"], network_data["data_type"], network_data["target_data"])) if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) if forward_skt: if network_data["stream"]: sent = False while not sent: try: log.info("sending={}".format( network_data["stream"])) send_msg( forward_skt, network_data["stream"] .encode("utf-8")) sent = True except Exception as e: sent = False time.sleep(0.5) try: forward_skt.close() forward_skt = None except Exception as w: forward_skt = None forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) # end of reconnecting log.info("sent={}".format( network_data["stream"])) if need_response: log.info("receiving") cdr_res = forward_skt.recv(1024) log.info(("cdr - res{}") .format(cdr_res)) else: log.info(("{} EMPTY stream={} " "error={} status={}") .format( name, network_data["stream"], network_data["err"], network_data["status"])) else: log.info(("{} not_supported valid={} " "packet data_type={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["status"])) elif network_data["status"] == FILTERED: log.info(("{} filtered={} status={}") .format(name, network_data["filtered"], network_data["status"])) else: if network_data["status"] == INVALID: log.info(("{} invalid={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) else: log.info(("{} unknown={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) # end of if valid or not data except KeyboardInterrupt as k: log.info(("{} stopping") .format(name)) break except Exception as e: log.error(("{} failed packaging packet to forward " "with ex={}") .format(name, e)) break # end of try/ex during payload processing # end of if found a next_task log.info(("Consumer: {} {}") .format(name, next_task)) task_queue.task_done() if need_response: answer = "processed: {}".format(next_task()) result_queue.put(answer) # end of while if forward_skt: try: forward_skt.close() log.info("CLOSED connection") forward_skt = None except Exception: log.info("CLOSED connection") # end of cleaning up forwarding socket log.info("{} Done".format(name)) return
[ "def", "publish_processed_network_packets", "(", "name", "=", "\"not-set\"", ",", "task_queue", "=", "None", ",", "result_queue", "=", "None", ",", "need_response", "=", "False", ",", "shutdown_msg", "=", "\"SHUTDOWN\"", ")", ":", "# these keys need to be cycled to prevent", "# exploiting static keys", "filter_key", "=", "ev", "(", "\"IGNORE_KEY\"", ",", "INCLUDED_IGNORE_KEY", ")", "forward_host", "=", "ev", "(", "\"FORWARD_HOST\"", ",", "\"127.0.0.1\"", ")", "forward_port", "=", "int", "(", "ev", "(", "\"FORWARD_PORT\"", ",", "\"80\"", ")", ")", "include_filter_key", "=", "ev", "(", "\"FILTER_KEY\"", ",", "\"\"", ")", "if", "not", "include_filter_key", "and", "filter_key", ":", "include_filter_key", "=", "filter_key", "filter_keys", "=", "[", "filter_key", "]", "log", ".", "info", "(", "(", "\"START consumer={} \"", "\"forward={}:{} with \"", "\"key={} filters={}\"", ")", ".", "format", "(", "name", ",", "forward_host", ",", "forward_port", ",", "include_filter_key", ",", "filter_key", ")", ")", "forward_skt", "=", "None", "not_done", "=", "True", "while", "not_done", ":", "if", "not", "forward_skt", ":", "forward_skt", "=", "connect_forwarder", "(", "forward_host", "=", "forward_host", ",", "forward_port", "=", "forward_port", ")", "next_task", "=", "task_queue", ".", "get", "(", ")", "if", "next_task", ":", "if", "str", "(", "next_task", ")", "==", "shutdown_msg", ":", "# Poison pill for shutting down", "log", ".", "info", "(", "(", "\"{}: DONE CALLBACK \"", "\"Exiting msg={}\"", ")", ".", "format", "(", "name", ",", "next_task", ")", ")", "task_queue", ".", "task_done", "(", ")", "break", "# end of handling shutdown case", "try", ":", "log", ".", "debug", "(", "(", "\"{} parsing\"", ")", ".", "format", "(", "name", ")", ")", "source", "=", "next_task", ".", "source", "packet", "=", "next_task", ".", "payload", "if", "not", "packet", ":", "log", ".", "error", "(", "(", "\"{} invalid task found \"", "\"{} missing payload\"", ")", ".", "format", "(", "name", ",", "next_task", ")", ")", "break", "log", ".", "debug", "(", "(", "\"{} found msg from src={}\"", ")", ".", "format", "(", "name", ",", "source", ")", ")", "network_data", "=", "parse_network_data", "(", "data_packet", "=", "packet", ",", "include_filter_key", "=", "include_filter_key", ",", "filter_keys", "=", "filter_keys", ")", "if", "network_data", "[", "\"status\"", "]", "==", "VALID", ":", "if", "network_data", "[", "\"data_type\"", "]", "==", "TCP", "or", "network_data", "[", "\"data_type\"", "]", "==", "UDP", "or", "network_data", "[", "\"data_type\"", "]", "==", "ARP", "or", "network_data", "[", "\"data_type\"", "]", "==", "ICMP", ":", "log", ".", "info", "(", "(", "\"{} valid={} packet={} \"", "\"data={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"id\"", "]", ",", "network_data", "[", "\"data_type\"", "]", ",", "network_data", "[", "\"target_data\"", "]", ")", ")", "if", "not", "forward_skt", ":", "forward_skt", "=", "connect_forwarder", "(", "forward_host", "=", "forward_host", ",", "forward_port", "=", "forward_port", ")", "if", "forward_skt", ":", "if", "network_data", "[", "\"stream\"", "]", ":", "sent", "=", "False", "while", "not", "sent", ":", "try", ":", "log", ".", "info", "(", "\"sending={}\"", ".", "format", "(", "network_data", "[", "\"stream\"", "]", ")", ")", "send_msg", "(", "forward_skt", ",", "network_data", "[", "\"stream\"", "]", ".", "encode", "(", "\"utf-8\"", ")", ")", "sent", "=", "True", "except", "Exception", "as", "e", ":", "sent", "=", "False", "time", ".", "sleep", "(", "0.5", ")", "try", ":", "forward_skt", ".", "close", "(", ")", "forward_skt", "=", "None", "except", "Exception", "as", "w", ":", "forward_skt", "=", "None", "forward_skt", "=", "connect_forwarder", "(", "forward_host", "=", "forward_host", ",", "forward_port", "=", "forward_port", ")", "# end of reconnecting", "log", ".", "info", "(", "\"sent={}\"", ".", "format", "(", "network_data", "[", "\"stream\"", "]", ")", ")", "if", "need_response", ":", "log", ".", "info", "(", "\"receiving\"", ")", "cdr_res", "=", "forward_skt", ".", "recv", "(", "1024", ")", "log", ".", "info", "(", "(", "\"cdr - res{}\"", ")", ".", "format", "(", "cdr_res", ")", ")", "else", ":", "log", ".", "info", "(", "(", "\"{} EMPTY stream={} \"", "\"error={} status={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"stream\"", "]", ",", "network_data", "[", "\"err\"", "]", ",", "network_data", "[", "\"status\"", "]", ")", ")", "else", ":", "log", ".", "info", "(", "(", "\"{} not_supported valid={} \"", "\"packet data_type={} status={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"id\"", "]", ",", "network_data", "[", "\"data_type\"", "]", ",", "network_data", "[", "\"status\"", "]", ")", ")", "elif", "network_data", "[", "\"status\"", "]", "==", "FILTERED", ":", "log", ".", "info", "(", "(", "\"{} filtered={} status={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"filtered\"", "]", ",", "network_data", "[", "\"status\"", "]", ")", ")", "else", ":", "if", "network_data", "[", "\"status\"", "]", "==", "INVALID", ":", "log", ".", "info", "(", "(", "\"{} invalid={} packet={} \"", "\"error={} status={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"id\"", "]", ",", "network_data", "[", "\"data_type\"", "]", ",", "network_data", "[", "\"error\"", "]", ",", "network_data", "[", "\"status\"", "]", ")", ")", "else", ":", "log", ".", "info", "(", "(", "\"{} unknown={} packet={} \"", "\"error={} status={}\"", ")", ".", "format", "(", "name", ",", "network_data", "[", "\"id\"", "]", ",", "network_data", "[", "\"data_type\"", "]", ",", "network_data", "[", "\"error\"", "]", ",", "network_data", "[", "\"status\"", "]", ")", ")", "# end of if valid or not data", "except", "KeyboardInterrupt", "as", "k", ":", "log", ".", "info", "(", "(", "\"{} stopping\"", ")", ".", "format", "(", "name", ")", ")", "break", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "(", "\"{} failed packaging packet to forward \"", "\"with ex={}\"", ")", ".", "format", "(", "name", ",", "e", ")", ")", "break", "# end of try/ex during payload processing", "# end of if found a next_task", "log", ".", "info", "(", "(", "\"Consumer: {} {}\"", ")", ".", "format", "(", "name", ",", "next_task", ")", ")", "task_queue", ".", "task_done", "(", ")", "if", "need_response", ":", "answer", "=", "\"processed: {}\"", ".", "format", "(", "next_task", "(", ")", ")", "result_queue", ".", "put", "(", "answer", ")", "# end of while", "if", "forward_skt", ":", "try", ":", "forward_skt", ".", "close", "(", ")", "log", ".", "info", "(", "\"CLOSED connection\"", ")", "forward_skt", "=", "None", "except", "Exception", ":", "log", ".", "info", "(", "\"CLOSED connection\"", ")", "# end of cleaning up forwarding socket", "log", ".", "info", "(", "\"{} Done\"", ".", "format", "(", "name", ")", ")", "return" ]
# Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json"
[ "#", "Redis", "/", "RabbitMQ", "/", "SQS", "messaging", "endpoints", "for", "pub", "-", "sub", "routing_key", "=", "ev", "(", "PUBLISH_EXCHANGE", "reporting", ".", "accounts", ")", "queue_name", "=", "ev", "(", "PUBLISH_QUEUE", "reporting", ".", "accounts", ")", "auth_url", "=", "ev", "(", "PUB_BROKER_URL", "redis", ":", "//", "localhost", ":", "6379", "/", "15", ")", "serializer", "=", "json" ]
4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/network_agent.py#L35-L244
train
jay-johnson/network-pipeline
network_pipeline/scripts/network_agent.py
run_main
def run_main( need_response=False, callback=None): """run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method """ stop_file = ev("STOP_FILE", "/opt/stop_recording") num_workers = int(ev("NUM_WORKERS", "1")) shutdown_msg = "SHUTDOWN" log.info("Start - {}".format(name)) log.info("Creating multiprocessing queue") tasks = multiprocessing.JoinableQueue() queue_to_consume = multiprocessing.Queue() host = "localhost" # Start consumers log.info("Starting Consumers to process queued tasks") consumers = start_consumers_for_queue( num_workers=num_workers, tasks=tasks, queue_to_consume=queue_to_consume, shutdown_msg=shutdown_msg, consumer_class=WorkerToProcessPackets, callback=callback) log.info("creating socket") skt = create_layer_2_socket() log.info("socket created") not_done = True while not_done: if not skt: log.info("Failed to create layer 2 socket") log.info("Please make sure to run as root") not_done = False break try: if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file exists # Only works on linux packet = skt.recvfrom(65565) if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file was created during a wait loop tasks.put(NetworkPacketTask(source=host, payload=packet)) except KeyboardInterrupt as k: log.info("Stopping") not_done = False break except Exception as e: log.error(("Failed reading socket with ex={}") .format(e)) not_done = False break # end of try/ex during socket receving # end of while processing network packets log.info(("Shutting down consumers={}") .format(len(consumers))) shutdown_consumers(num_workers=num_workers, tasks=tasks) # Wait for all of the tasks to finish if need_response: log.info("Waiting for tasks to finish") tasks.join() log.info("Done waiting for tasks to finish")
python
def run_main( need_response=False, callback=None): """run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method """ stop_file = ev("STOP_FILE", "/opt/stop_recording") num_workers = int(ev("NUM_WORKERS", "1")) shutdown_msg = "SHUTDOWN" log.info("Start - {}".format(name)) log.info("Creating multiprocessing queue") tasks = multiprocessing.JoinableQueue() queue_to_consume = multiprocessing.Queue() host = "localhost" # Start consumers log.info("Starting Consumers to process queued tasks") consumers = start_consumers_for_queue( num_workers=num_workers, tasks=tasks, queue_to_consume=queue_to_consume, shutdown_msg=shutdown_msg, consumer_class=WorkerToProcessPackets, callback=callback) log.info("creating socket") skt = create_layer_2_socket() log.info("socket created") not_done = True while not_done: if not skt: log.info("Failed to create layer 2 socket") log.info("Please make sure to run as root") not_done = False break try: if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file exists # Only works on linux packet = skt.recvfrom(65565) if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file was created during a wait loop tasks.put(NetworkPacketTask(source=host, payload=packet)) except KeyboardInterrupt as k: log.info("Stopping") not_done = False break except Exception as e: log.error(("Failed reading socket with ex={}") .format(e)) not_done = False break # end of try/ex during socket receving # end of while processing network packets log.info(("Shutting down consumers={}") .format(len(consumers))) shutdown_consumers(num_workers=num_workers, tasks=tasks) # Wait for all of the tasks to finish if need_response: log.info("Waiting for tasks to finish") tasks.join() log.info("Done waiting for tasks to finish")
[ "def", "run_main", "(", "need_response", "=", "False", ",", "callback", "=", "None", ")", ":", "stop_file", "=", "ev", "(", "\"STOP_FILE\"", ",", "\"/opt/stop_recording\"", ")", "num_workers", "=", "int", "(", "ev", "(", "\"NUM_WORKERS\"", ",", "\"1\"", ")", ")", "shutdown_msg", "=", "\"SHUTDOWN\"", "log", ".", "info", "(", "\"Start - {}\"", ".", "format", "(", "name", ")", ")", "log", ".", "info", "(", "\"Creating multiprocessing queue\"", ")", "tasks", "=", "multiprocessing", ".", "JoinableQueue", "(", ")", "queue_to_consume", "=", "multiprocessing", ".", "Queue", "(", ")", "host", "=", "\"localhost\"", "# Start consumers", "log", ".", "info", "(", "\"Starting Consumers to process queued tasks\"", ")", "consumers", "=", "start_consumers_for_queue", "(", "num_workers", "=", "num_workers", ",", "tasks", "=", "tasks", ",", "queue_to_consume", "=", "queue_to_consume", ",", "shutdown_msg", "=", "shutdown_msg", ",", "consumer_class", "=", "WorkerToProcessPackets", ",", "callback", "=", "callback", ")", "log", ".", "info", "(", "\"creating socket\"", ")", "skt", "=", "create_layer_2_socket", "(", ")", "log", ".", "info", "(", "\"socket created\"", ")", "not_done", "=", "True", "while", "not_done", ":", "if", "not", "skt", ":", "log", ".", "info", "(", "\"Failed to create layer 2 socket\"", ")", "log", ".", "info", "(", "\"Please make sure to run as root\"", ")", "not_done", "=", "False", "break", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "stop_file", ")", ":", "log", ".", "info", "(", "(", "\"Detected stop_file={}\"", ")", ".", "format", "(", "stop_file", ")", ")", "not_done", "=", "False", "break", "# stop if the file exists", "# Only works on linux", "packet", "=", "skt", ".", "recvfrom", "(", "65565", ")", "if", "os", ".", "path", ".", "exists", "(", "stop_file", ")", ":", "log", ".", "info", "(", "(", "\"Detected stop_file={}\"", ")", ".", "format", "(", "stop_file", ")", ")", "not_done", "=", "False", "break", "# stop if the file was created during a wait loop", "tasks", ".", "put", "(", "NetworkPacketTask", "(", "source", "=", "host", ",", "payload", "=", "packet", ")", ")", "except", "KeyboardInterrupt", "as", "k", ":", "log", ".", "info", "(", "\"Stopping\"", ")", "not_done", "=", "False", "break", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "(", "\"Failed reading socket with ex={}\"", ")", ".", "format", "(", "e", ")", ")", "not_done", "=", "False", "break", "# end of try/ex during socket receving", "# end of while processing network packets", "log", ".", "info", "(", "(", "\"Shutting down consumers={}\"", ")", ".", "format", "(", "len", "(", "consumers", ")", ")", ")", "shutdown_consumers", "(", "num_workers", "=", "num_workers", ",", "tasks", "=", "tasks", ")", "# Wait for all of the tasks to finish", "if", "need_response", ":", "log", ".", "info", "(", "\"Waiting for tasks to finish\"", ")", "tasks", ".", "join", "(", ")", "log", ".", "info", "(", "\"Done waiting for tasks to finish\"", ")" ]
run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method
[ "run_main" ]
4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa
https://github.com/jay-johnson/network-pipeline/blob/4e53ae13fe12085e0cf2e5e1aff947368f4f1ffa/network_pipeline/scripts/network_agent.py#L248-L341
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
best_model
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
python
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
[ "def", "best_model", "(", "seq2hmm", ")", ":", "for", "seq", "in", "seq2hmm", ":", "best", "=", "[", "]", "for", "model", "in", "seq2hmm", "[", "seq", "]", ":", "best", ".", "append", "(", "[", "model", ",", "sorted", "(", "[", "i", "[", "-", "1", "]", "for", "i", "in", "seq2hmm", "[", "seq", "]", "[", "model", "]", "]", ",", "reverse", "=", "True", ")", "[", "0", "]", "]", ")", "best_model", "=", "sorted", "(", "best", ",", "key", "=", "itemgetter", "(", "1", ")", ",", "reverse", "=", "True", ")", "[", "0", "]", "[", "0", "]", "seq2hmm", "[", "seq", "]", "=", "[", "best_model", "]", "+", "[", "seq2hmm", "[", "seq", "]", "[", "best_model", "]", "]", "return", "seq2hmm" ]
determine the best model: archaea, bacteria, eukarya (best score)
[ "determine", "the", "best", "model", ":", "archaea", "bacteria", "eukarya", "(", "best", "score", ")" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L21-L31
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
check_gaps
def check_gaps(matches, gap_threshold = 0): """ check for large gaps between alignment windows """ gaps = [] prev = None for match in sorted(matches, key = itemgetter(0)): if prev is None: prev = match continue if match[0] - prev[1] >= gap_threshold: gaps.append([prev, match]) prev = match return [[i[0][1], i[1][0]] for i in gaps]
python
def check_gaps(matches, gap_threshold = 0): """ check for large gaps between alignment windows """ gaps = [] prev = None for match in sorted(matches, key = itemgetter(0)): if prev is None: prev = match continue if match[0] - prev[1] >= gap_threshold: gaps.append([prev, match]) prev = match return [[i[0][1], i[1][0]] for i in gaps]
[ "def", "check_gaps", "(", "matches", ",", "gap_threshold", "=", "0", ")", ":", "gaps", "=", "[", "]", "prev", "=", "None", "for", "match", "in", "sorted", "(", "matches", ",", "key", "=", "itemgetter", "(", "0", ")", ")", ":", "if", "prev", "is", "None", ":", "prev", "=", "match", "continue", "if", "match", "[", "0", "]", "-", "prev", "[", "1", "]", ">=", "gap_threshold", ":", "gaps", ".", "append", "(", "[", "prev", ",", "match", "]", ")", "prev", "=", "match", "return", "[", "[", "i", "[", "0", "]", "[", "1", "]", ",", "i", "[", "1", "]", "[", "0", "]", "]", "for", "i", "in", "gaps", "]" ]
check for large gaps between alignment windows
[ "check", "for", "large", "gaps", "between", "alignment", "windows" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L33-L46
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
check_overlap
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
python
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
[ "def", "check_overlap", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "for", "prev", "in", "current", ":", "p_coords", "=", "prev", "[", "2", ":", "4", "]", "coords", "=", "hit", "[", "2", ":", "4", "]", "if", "get_overlap", "(", "coords", ",", "p_coords", ")", ">=", "overlap", ":", "return", "True", "return", "False" ]
determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene
[ "determine", "if", "sequence", "has", "already", "hit", "the", "same", "part", "of", "the", "model", "indicating", "that", "this", "hit", "is", "for", "another", "16S", "rRNA", "gene" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L51-L61
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
check_order
def check_order(current, hit, overlap = 200): """ determine if hits are sequential on model and on the same strand * if not, they should be split into different groups """ prev_model = current[-1][2:4] prev_strand = current[-1][-2] hit_model = hit[2:4] hit_strand = hit[-2] # make sure they are on the same strand if prev_strand != hit_strand: return False # check for sequential hits on + strand if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap): return False # check for sequential hits on - strand if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap): return False else: return True
python
def check_order(current, hit, overlap = 200): """ determine if hits are sequential on model and on the same strand * if not, they should be split into different groups """ prev_model = current[-1][2:4] prev_strand = current[-1][-2] hit_model = hit[2:4] hit_strand = hit[-2] # make sure they are on the same strand if prev_strand != hit_strand: return False # check for sequential hits on + strand if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap): return False # check for sequential hits on - strand if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap): return False else: return True
[ "def", "check_order", "(", "current", ",", "hit", ",", "overlap", "=", "200", ")", ":", "prev_model", "=", "current", "[", "-", "1", "]", "[", "2", ":", "4", "]", "prev_strand", "=", "current", "[", "-", "1", "]", "[", "-", "2", "]", "hit_model", "=", "hit", "[", "2", ":", "4", "]", "hit_strand", "=", "hit", "[", "-", "2", "]", "# make sure they are on the same strand", "if", "prev_strand", "!=", "hit_strand", ":", "return", "False", "# check for sequential hits on + strand", "if", "prev_strand", "==", "'+'", "and", "(", "prev_model", "[", "1", "]", "-", "hit_model", "[", "0", "]", ">=", "overlap", ")", ":", "return", "False", "# check for sequential hits on - strand", "if", "prev_strand", "==", "'-'", "and", "(", "hit_model", "[", "1", "]", "-", "prev_model", "[", "0", "]", ">=", "overlap", ")", ":", "return", "False", "else", ":", "return", "True" ]
determine if hits are sequential on model and on the same strand * if not, they should be split into different groups
[ "determine", "if", "hits", "are", "sequential", "on", "model", "and", "on", "the", "same", "strand", "*", "if", "not", "they", "should", "be", "split", "into", "different", "groups" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L63-L83
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
hit_groups
def hit_groups(hits): """ * each sequence may have more than one 16S rRNA gene * group hits for each gene """ groups = [] current = False for hit in sorted(hits, key = itemgetter(0)): if current is False: current = [hit] elif check_overlap(current, hit) is True or check_order(current, hit) is False: groups.append(current) current = [hit] else: current.append(hit) groups.append(current) return groups
python
def hit_groups(hits): """ * each sequence may have more than one 16S rRNA gene * group hits for each gene """ groups = [] current = False for hit in sorted(hits, key = itemgetter(0)): if current is False: current = [hit] elif check_overlap(current, hit) is True or check_order(current, hit) is False: groups.append(current) current = [hit] else: current.append(hit) groups.append(current) return groups
[ "def", "hit_groups", "(", "hits", ")", ":", "groups", "=", "[", "]", "current", "=", "False", "for", "hit", "in", "sorted", "(", "hits", ",", "key", "=", "itemgetter", "(", "0", ")", ")", ":", "if", "current", "is", "False", ":", "current", "=", "[", "hit", "]", "elif", "check_overlap", "(", "current", ",", "hit", ")", "is", "True", "or", "check_order", "(", "current", ",", "hit", ")", "is", "False", ":", "groups", ".", "append", "(", "current", ")", "current", "=", "[", "hit", "]", "else", ":", "current", ".", "append", "(", "hit", ")", "groups", ".", "append", "(", "current", ")", "return", "groups" ]
* each sequence may have more than one 16S rRNA gene * group hits for each gene
[ "*", "each", "sequence", "may", "have", "more", "than", "one", "16S", "rRNA", "gene", "*", "group", "hits", "for", "each", "gene" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L85-L101
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
find_coordinates
def find_coordinates(hmms, bit_thresh): """ find 16S rRNA gene sequence coordinates """ # get coordinates from cmsearch output seq2hmm = parse_hmm(hmms, bit_thresh) seq2hmm = best_model(seq2hmm) group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps] for seq, info in list(seq2hmm.items()): group2hmm[seq] = {} # info = [model, [[hit1], [hit2], ...]] for group_num, group in enumerate(hit_groups(info[1])): # group is a group of hits to a single 16S gene # determine matching strand based on best hit best = sorted(group, reverse = True, key = itemgetter(-1))[0] strand = best[5] coordinates = [i[0] for i in group] + [i[1] for i in group] coordinates = [min(coordinates), max(coordinates), strand] # make sure all hits are to the same strand matches = [i for i in group if i[5] == strand] # gaps = [[gstart, gend], [gstart2, gend2]] gaps = check_gaps(matches) group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] return group2hmm
python
def find_coordinates(hmms, bit_thresh): """ find 16S rRNA gene sequence coordinates """ # get coordinates from cmsearch output seq2hmm = parse_hmm(hmms, bit_thresh) seq2hmm = best_model(seq2hmm) group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps] for seq, info in list(seq2hmm.items()): group2hmm[seq] = {} # info = [model, [[hit1], [hit2], ...]] for group_num, group in enumerate(hit_groups(info[1])): # group is a group of hits to a single 16S gene # determine matching strand based on best hit best = sorted(group, reverse = True, key = itemgetter(-1))[0] strand = best[5] coordinates = [i[0] for i in group] + [i[1] for i in group] coordinates = [min(coordinates), max(coordinates), strand] # make sure all hits are to the same strand matches = [i for i in group if i[5] == strand] # gaps = [[gstart, gend], [gstart2, gend2]] gaps = check_gaps(matches) group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] return group2hmm
[ "def", "find_coordinates", "(", "hmms", ",", "bit_thresh", ")", ":", "# get coordinates from cmsearch output", "seq2hmm", "=", "parse_hmm", "(", "hmms", ",", "bit_thresh", ")", "seq2hmm", "=", "best_model", "(", "seq2hmm", ")", "group2hmm", "=", "{", "}", "# group2hmm[seq][group] = [model, strand, coordinates, matches, gaps]", "for", "seq", ",", "info", "in", "list", "(", "seq2hmm", ".", "items", "(", ")", ")", ":", "group2hmm", "[", "seq", "]", "=", "{", "}", "# info = [model, [[hit1], [hit2], ...]]", "for", "group_num", ",", "group", "in", "enumerate", "(", "hit_groups", "(", "info", "[", "1", "]", ")", ")", ":", "# group is a group of hits to a single 16S gene", "# determine matching strand based on best hit", "best", "=", "sorted", "(", "group", ",", "reverse", "=", "True", ",", "key", "=", "itemgetter", "(", "-", "1", ")", ")", "[", "0", "]", "strand", "=", "best", "[", "5", "]", "coordinates", "=", "[", "i", "[", "0", "]", "for", "i", "in", "group", "]", "+", "[", "i", "[", "1", "]", "for", "i", "in", "group", "]", "coordinates", "=", "[", "min", "(", "coordinates", ")", ",", "max", "(", "coordinates", ")", ",", "strand", "]", "# make sure all hits are to the same strand", "matches", "=", "[", "i", "for", "i", "in", "group", "if", "i", "[", "5", "]", "==", "strand", "]", "# gaps = [[gstart, gend], [gstart2, gend2]]", "gaps", "=", "check_gaps", "(", "matches", ")", "group2hmm", "[", "seq", "]", "[", "group_num", "]", "=", "[", "info", "[", "0", "]", ",", "strand", ",", "coordinates", ",", "matches", ",", "gaps", "]", "return", "group2hmm" ]
find 16S rRNA gene sequence coordinates
[ "find", "16S", "rRNA", "gene", "sequence", "coordinates" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L103-L126
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
get_info
def get_info(line, bit_thresh): """ get info from either ssu-cmsearch or cmsearch output """ if len(line) >= 18: # output is from cmsearch id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16] sstart, send, strand = int(line[7]), int(line[8]), line[9] mstart, mend = int(line[5]), int(line[6]) elif len(line) == 9: # output is from ssu-cmsearch if bit_thresh == 0: print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr) print('# please specify a bit score threshold', file=sys.stderr) exit() id, model, bit = line[1].split()[0], line[0], float(line[6]) inc = '!' # this is not a feature of ssu-cmsearch sstart, send = int(line[2]), int(line[3]) mstart, mend = int(4), int(5) if send >= sstart: strand = '+' else: strand = '-' else: print('# unsupported hmm format:', file=sys.stderr) print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr) exit() coords = [sstart, send] sstart, send = min(coords), max(coords) mcoords = [mstart, mend] mstart, mend = min(mcoords), max(mcoords) return id, model, bit, sstart, send, mstart, mend, strand, inc
python
def get_info(line, bit_thresh): """ get info from either ssu-cmsearch or cmsearch output """ if len(line) >= 18: # output is from cmsearch id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16] sstart, send, strand = int(line[7]), int(line[8]), line[9] mstart, mend = int(line[5]), int(line[6]) elif len(line) == 9: # output is from ssu-cmsearch if bit_thresh == 0: print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr) print('# please specify a bit score threshold', file=sys.stderr) exit() id, model, bit = line[1].split()[0], line[0], float(line[6]) inc = '!' # this is not a feature of ssu-cmsearch sstart, send = int(line[2]), int(line[3]) mstart, mend = int(4), int(5) if send >= sstart: strand = '+' else: strand = '-' else: print('# unsupported hmm format:', file=sys.stderr) print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr) exit() coords = [sstart, send] sstart, send = min(coords), max(coords) mcoords = [mstart, mend] mstart, mend = min(mcoords), max(mcoords) return id, model, bit, sstart, send, mstart, mend, strand, inc
[ "def", "get_info", "(", "line", ",", "bit_thresh", ")", ":", "if", "len", "(", "line", ")", ">=", "18", ":", "# output is from cmsearch", "id", ",", "model", ",", "bit", ",", "inc", "=", "line", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ",", "line", "[", "2", "]", ",", "float", "(", "line", "[", "14", "]", ")", ",", "line", "[", "16", "]", "sstart", ",", "send", ",", "strand", "=", "int", "(", "line", "[", "7", "]", ")", ",", "int", "(", "line", "[", "8", "]", ")", ",", "line", "[", "9", "]", "mstart", ",", "mend", "=", "int", "(", "line", "[", "5", "]", ")", ",", "int", "(", "line", "[", "6", "]", ")", "elif", "len", "(", "line", ")", "==", "9", ":", "# output is from ssu-cmsearch", "if", "bit_thresh", "==", "0", ":", "print", "(", "'# ssu-cmsearch does not include a model-specific inclusion threshold, '", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'# please specify a bit score threshold'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "id", ",", "model", ",", "bit", "=", "line", "[", "1", "]", ".", "split", "(", ")", "[", "0", "]", ",", "line", "[", "0", "]", ",", "float", "(", "line", "[", "6", "]", ")", "inc", "=", "'!'", "# this is not a feature of ssu-cmsearch", "sstart", ",", "send", "=", "int", "(", "line", "[", "2", "]", ")", ",", "int", "(", "line", "[", "3", "]", ")", "mstart", ",", "mend", "=", "int", "(", "4", ")", ",", "int", "(", "5", ")", "if", "send", ">=", "sstart", ":", "strand", "=", "'+'", "else", ":", "strand", "=", "'-'", "else", ":", "print", "(", "'# unsupported hmm format:'", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "'# provide tabular output from ssu-cmsearch and cmsearch supported'", ",", "file", "=", "sys", ".", "stderr", ")", "exit", "(", ")", "coords", "=", "[", "sstart", ",", "send", "]", "sstart", ",", "send", "=", "min", "(", "coords", ")", ",", "max", "(", "coords", ")", "mcoords", "=", "[", "mstart", ",", "mend", "]", "mstart", ",", "mend", "=", "min", "(", "mcoords", ")", ",", "max", "(", "mcoords", ")", "return", "id", ",", "model", ",", "bit", ",", "sstart", ",", "send", ",", "mstart", ",", "mend", ",", "strand", ",", "inc" ]
get info from either ssu-cmsearch or cmsearch output
[ "get", "info", "from", "either", "ssu", "-", "cmsearch", "or", "cmsearch", "output" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L128-L157
train
christophertbrown/bioscripts
ctbBio/16SfromHMM.py
check_buffer
def check_buffer(coords, length, buffer): """ check to see how much of the buffer is being used """ s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
python
def check_buffer(coords, length, buffer): """ check to see how much of the buffer is being used """ s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
[ "def", "check_buffer", "(", "coords", ",", "length", ",", "buffer", ")", ":", "s", "=", "min", "(", "coords", "[", "0", "]", ",", "buffer", ")", "e", "=", "min", "(", "length", "-", "coords", "[", "1", "]", ",", "buffer", ")", "return", "[", "s", ",", "e", "]" ]
check to see how much of the buffer is being used
[ "check", "to", "see", "how", "much", "of", "the", "buffer", "is", "being", "used" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/16SfromHMM.py#L189-L195
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
convert_parser_to
def convert_parser_to(parser, parser_or_type, metadata_props=None): """ :return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated """ old_parser = parser if isinstance(parser, MetadataParser) else get_metadata_parser(parser) new_parser = get_metadata_parser(parser_or_type) for prop in (metadata_props or _supported_props): setattr(new_parser, prop, deepcopy(getattr(old_parser, prop, u''))) new_parser.update() return new_parser
python
def convert_parser_to(parser, parser_or_type, metadata_props=None): """ :return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated """ old_parser = parser if isinstance(parser, MetadataParser) else get_metadata_parser(parser) new_parser = get_metadata_parser(parser_or_type) for prop in (metadata_props or _supported_props): setattr(new_parser, prop, deepcopy(getattr(old_parser, prop, u''))) new_parser.update() return new_parser
[ "def", "convert_parser_to", "(", "parser", ",", "parser_or_type", ",", "metadata_props", "=", "None", ")", ":", "old_parser", "=", "parser", "if", "isinstance", "(", "parser", ",", "MetadataParser", ")", "else", "get_metadata_parser", "(", "parser", ")", "new_parser", "=", "get_metadata_parser", "(", "parser_or_type", ")", "for", "prop", "in", "(", "metadata_props", "or", "_supported_props", ")", ":", "setattr", "(", "new_parser", ",", "prop", ",", "deepcopy", "(", "getattr", "(", "old_parser", ",", "prop", ",", "u''", ")", ")", ")", "new_parser", ".", "update", "(", ")", "return", "new_parser" ]
:return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated
[ ":", "return", ":", "a", "parser", "of", "type", "parser_or_type", "initialized", "with", "the", "properties", "of", "parser", ".", "If", "parser_or_type", "is", "a", "type", "an", "instance", "of", "it", "must", "contain", "a", "update", "method", ".", "The", "update", "method", "must", "also", "process", "the", "set", "of", "properties", "supported", "by", "MetadataParser", "for", "the", "conversion", "to", "have", "any", "affect", ".", ":", "param", "parser", ":", "the", "parser", "(", "or", "content", "or", "parser", "type", ")", "to", "convert", "to", "new_type", ":", "param", "parser_or_type", ":", "a", "parser", "(", "or", "content", ")", "or", "type", "of", "parser", "to", "return", ":", "see", ":", "get_metadata_parser", "(", "metadata_container", ")", "for", "more", "on", "how", "parser_or_type", "is", "treated" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L30-L48
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
get_metadata_parser
def get_metadata_parser(metadata_container, **metadata_defaults): """ Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed """ parser_type = None if isinstance(metadata_container, MetadataParser): parser_type = type(metadata_container) elif isinstance(metadata_container, type): parser_type = metadata_container metadata_container = metadata_container().update(**metadata_defaults) xml_root, xml_tree = get_parsed_content(metadata_container) # The get_parsed_content method ensures only these roots will be returned parser = None if parser_type is not None: parser = parser_type(xml_tree, **metadata_defaults) elif xml_root in ISO_ROOTS: parser = IsoParser(xml_tree, **metadata_defaults) else: has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES) if xml_root == FGDC_ROOT and not has_arcgis_data: parser = FgdcParser(xml_tree, **metadata_defaults) elif xml_root in ARCGIS_ROOTS: parser = ArcGISParser(xml_tree, **metadata_defaults) return parser
python
def get_metadata_parser(metadata_container, **metadata_defaults): """ Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed """ parser_type = None if isinstance(metadata_container, MetadataParser): parser_type = type(metadata_container) elif isinstance(metadata_container, type): parser_type = metadata_container metadata_container = metadata_container().update(**metadata_defaults) xml_root, xml_tree = get_parsed_content(metadata_container) # The get_parsed_content method ensures only these roots will be returned parser = None if parser_type is not None: parser = parser_type(xml_tree, **metadata_defaults) elif xml_root in ISO_ROOTS: parser = IsoParser(xml_tree, **metadata_defaults) else: has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES) if xml_root == FGDC_ROOT and not has_arcgis_data: parser = FgdcParser(xml_tree, **metadata_defaults) elif xml_root in ARCGIS_ROOTS: parser = ArcGISParser(xml_tree, **metadata_defaults) return parser
[ "def", "get_metadata_parser", "(", "metadata_container", ",", "*", "*", "metadata_defaults", ")", ":", "parser_type", "=", "None", "if", "isinstance", "(", "metadata_container", ",", "MetadataParser", ")", ":", "parser_type", "=", "type", "(", "metadata_container", ")", "elif", "isinstance", "(", "metadata_container", ",", "type", ")", ":", "parser_type", "=", "metadata_container", "metadata_container", "=", "metadata_container", "(", ")", ".", "update", "(", "*", "*", "metadata_defaults", ")", "xml_root", ",", "xml_tree", "=", "get_parsed_content", "(", "metadata_container", ")", "# The get_parsed_content method ensures only these roots will be returned", "parser", "=", "None", "if", "parser_type", "is", "not", "None", ":", "parser", "=", "parser_type", "(", "xml_tree", ",", "*", "*", "metadata_defaults", ")", "elif", "xml_root", "in", "ISO_ROOTS", ":", "parser", "=", "IsoParser", "(", "xml_tree", ",", "*", "*", "metadata_defaults", ")", "else", ":", "has_arcgis_data", "=", "any", "(", "element_exists", "(", "xml_tree", ",", "e", ")", "for", "e", "in", "ARCGIS_NODES", ")", "if", "xml_root", "==", "FGDC_ROOT", "and", "not", "has_arcgis_data", ":", "parser", "=", "FgdcParser", "(", "xml_tree", ",", "*", "*", "metadata_defaults", ")", "elif", "xml_root", "in", "ARCGIS_ROOTS", ":", "parser", "=", "ArcGISParser", "(", "xml_tree", ",", "*", "*", "metadata_defaults", ")", "return", "parser" ]
Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed
[ "Takes", "a", "metadata_container", "which", "may", "be", "a", "type", "or", "instance", "of", "a", "parser", "a", "dict", "string", "or", "file", ".", ":", "return", ":", "a", "new", "instance", "of", "a", "parser", "corresponding", "to", "the", "standard", "represented", "by", "metadata_container", ":", "see", ":", "get_parsed_content", "(", "metdata_content", ")", "for", "more", "on", "types", "of", "content", "that", "can", "be", "parsed" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L51-L85
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
get_parsed_content
def get_parsed_content(metadata_content): """ Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils """ _import_parsers() # Prevents circular dependencies between modules xml_tree = None if metadata_content is None: raise NoContent('Metadata has no data') else: if isinstance(metadata_content, MetadataParser): xml_tree = deepcopy(metadata_content._xml_tree) elif isinstance(metadata_content, dict): xml_tree = get_element_tree(metadata_content) else: try: # Strip name spaces from file or XML content xml_tree = get_element_tree(metadata_content) except Exception: xml_tree = None # Several exceptions possible, outcome is the same if xml_tree is None: raise InvalidContent( 'Cannot instantiate a {parser_type} parser with invalid content to parse', parser_type=type(metadata_content).__name__ ) xml_root = get_element_name(xml_tree) if xml_root is None: raise NoContent('Metadata contains no data') elif xml_root not in VALID_ROOTS: content = type(metadata_content).__name__ raise InvalidContent('Invalid root element for {content}: {xml_root}', content=content, xml_root=xml_root) return xml_root, xml_tree
python
def get_parsed_content(metadata_content): """ Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils """ _import_parsers() # Prevents circular dependencies between modules xml_tree = None if metadata_content is None: raise NoContent('Metadata has no data') else: if isinstance(metadata_content, MetadataParser): xml_tree = deepcopy(metadata_content._xml_tree) elif isinstance(metadata_content, dict): xml_tree = get_element_tree(metadata_content) else: try: # Strip name spaces from file or XML content xml_tree = get_element_tree(metadata_content) except Exception: xml_tree = None # Several exceptions possible, outcome is the same if xml_tree is None: raise InvalidContent( 'Cannot instantiate a {parser_type} parser with invalid content to parse', parser_type=type(metadata_content).__name__ ) xml_root = get_element_name(xml_tree) if xml_root is None: raise NoContent('Metadata contains no data') elif xml_root not in VALID_ROOTS: content = type(metadata_content).__name__ raise InvalidContent('Invalid root element for {content}: {xml_root}', content=content, xml_root=xml_root) return xml_root, xml_tree
[ "def", "get_parsed_content", "(", "metadata_content", ")", ":", "_import_parsers", "(", ")", "# Prevents circular dependencies between modules", "xml_tree", "=", "None", "if", "metadata_content", "is", "None", ":", "raise", "NoContent", "(", "'Metadata has no data'", ")", "else", ":", "if", "isinstance", "(", "metadata_content", ",", "MetadataParser", ")", ":", "xml_tree", "=", "deepcopy", "(", "metadata_content", ".", "_xml_tree", ")", "elif", "isinstance", "(", "metadata_content", ",", "dict", ")", ":", "xml_tree", "=", "get_element_tree", "(", "metadata_content", ")", "else", ":", "try", ":", "# Strip name spaces from file or XML content", "xml_tree", "=", "get_element_tree", "(", "metadata_content", ")", "except", "Exception", ":", "xml_tree", "=", "None", "# Several exceptions possible, outcome is the same", "if", "xml_tree", "is", "None", ":", "raise", "InvalidContent", "(", "'Cannot instantiate a {parser_type} parser with invalid content to parse'", ",", "parser_type", "=", "type", "(", "metadata_content", ")", ".", "__name__", ")", "xml_root", "=", "get_element_name", "(", "xml_tree", ")", "if", "xml_root", "is", "None", ":", "raise", "NoContent", "(", "'Metadata contains no data'", ")", "elif", "xml_root", "not", "in", "VALID_ROOTS", ":", "content", "=", "type", "(", "metadata_content", ")", ".", "__name__", "raise", "InvalidContent", "(", "'Invalid root element for {content}: {xml_root}'", ",", "content", "=", "content", ",", "xml_root", "=", "xml_root", ")", "return", "xml_root", ",", "xml_tree" ]
Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils
[ "Parses", "any", "of", "the", "following", "types", "of", "content", ":", "1", ".", "XML", "string", "or", "file", "object", ":", "parses", "XML", "content", "2", ".", "MetadataParser", "instance", ":", "deep", "copies", "xml_tree", "3", ".", "Dictionary", "with", "nested", "objects", "containing", ":", "-", "name", "(", "required", ")", ":", "the", "name", "of", "the", "element", "tag", "-", "text", ":", "the", "text", "contained", "by", "element", "-", "tail", ":", "text", "immediately", "following", "the", "element", "-", "attributes", ":", "a", "Dictionary", "containing", "element", "attributes", "-", "children", ":", "a", "List", "of", "converted", "child", "elements" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L88-L138
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
_import_parsers
def _import_parsers(): """ Lazy imports to prevent circular dependencies between this module and utils """ global ARCGIS_NODES global ARCGIS_ROOTS global ArcGISParser global FGDC_ROOT global FgdcParser global ISO_ROOTS global IsoParser global VALID_ROOTS if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None: from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS from gis_metadata.arcgis_metadata_parser import ArcGISParser if FGDC_ROOT is None or FgdcParser is None: from gis_metadata.fgdc_metadata_parser import FGDC_ROOT from gis_metadata.fgdc_metadata_parser import FgdcParser if ISO_ROOTS is None or IsoParser is None: from gis_metadata.iso_metadata_parser import ISO_ROOTS from gis_metadata.iso_metadata_parser import IsoParser if VALID_ROOTS is None: VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS)
python
def _import_parsers(): """ Lazy imports to prevent circular dependencies between this module and utils """ global ARCGIS_NODES global ARCGIS_ROOTS global ArcGISParser global FGDC_ROOT global FgdcParser global ISO_ROOTS global IsoParser global VALID_ROOTS if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None: from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS from gis_metadata.arcgis_metadata_parser import ArcGISParser if FGDC_ROOT is None or FgdcParser is None: from gis_metadata.fgdc_metadata_parser import FGDC_ROOT from gis_metadata.fgdc_metadata_parser import FgdcParser if ISO_ROOTS is None or IsoParser is None: from gis_metadata.iso_metadata_parser import ISO_ROOTS from gis_metadata.iso_metadata_parser import IsoParser if VALID_ROOTS is None: VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS)
[ "def", "_import_parsers", "(", ")", ":", "global", "ARCGIS_NODES", "global", "ARCGIS_ROOTS", "global", "ArcGISParser", "global", "FGDC_ROOT", "global", "FgdcParser", "global", "ISO_ROOTS", "global", "IsoParser", "global", "VALID_ROOTS", "if", "ARCGIS_NODES", "is", "None", "or", "ARCGIS_ROOTS", "is", "None", "or", "ArcGISParser", "is", "None", ":", "from", "gis_metadata", ".", "arcgis_metadata_parser", "import", "ARCGIS_NODES", "from", "gis_metadata", ".", "arcgis_metadata_parser", "import", "ARCGIS_ROOTS", "from", "gis_metadata", ".", "arcgis_metadata_parser", "import", "ArcGISParser", "if", "FGDC_ROOT", "is", "None", "or", "FgdcParser", "is", "None", ":", "from", "gis_metadata", ".", "fgdc_metadata_parser", "import", "FGDC_ROOT", "from", "gis_metadata", ".", "fgdc_metadata_parser", "import", "FgdcParser", "if", "ISO_ROOTS", "is", "None", "or", "IsoParser", "is", "None", ":", "from", "gis_metadata", ".", "iso_metadata_parser", "import", "ISO_ROOTS", "from", "gis_metadata", ".", "iso_metadata_parser", "import", "IsoParser", "if", "VALID_ROOTS", "is", "None", ":", "VALID_ROOTS", "=", "{", "FGDC_ROOT", "}", ".", "union", "(", "ARCGIS_ROOTS", "+", "ISO_ROOTS", ")" ]
Lazy imports to prevent circular dependencies between this module and utils
[ "Lazy", "imports", "to", "prevent", "circular", "dependencies", "between", "this", "module", "and", "utils" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L141-L170
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._init_metadata
def _init_metadata(self): """ Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value. """ if self._data_map is None: self._init_data_map() validate_properties(self._data_map, self._metadata_props) # Parse attribute values and assign them: key = parse(val) for prop in self._data_map: setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop)) self.has_data = any(getattr(self, prop) for prop in self._data_map)
python
def _init_metadata(self): """ Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value. """ if self._data_map is None: self._init_data_map() validate_properties(self._data_map, self._metadata_props) # Parse attribute values and assign them: key = parse(val) for prop in self._data_map: setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop)) self.has_data = any(getattr(self, prop) for prop in self._data_map)
[ "def", "_init_metadata", "(", "self", ")", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_init_data_map", "(", ")", "validate_properties", "(", "self", ".", "_data_map", ",", "self", ".", "_metadata_props", ")", "# Parse attribute values and assign them: key = parse(val)", "for", "prop", "in", "self", ".", "_data_map", ":", "setattr", "(", "self", ",", "prop", ",", "parse_property", "(", "self", ".", "_xml_tree", ",", "None", ",", "self", ".", "_data_map", ",", "prop", ")", ")", "self", ".", "has_data", "=", "any", "(", "getattr", "(", "self", ",", "prop", ")", "for", "prop", "in", "self", ".", "_data_map", ")" ]
Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value.
[ "Dynamically", "sets", "attributes", "from", "a", "Dictionary", "passed", "in", "by", "children", ".", "The", "Dictionary", "will", "contain", "the", "name", "of", "each", "attribute", "as", "keys", "and", "either", "an", "XPATH", "mapping", "to", "a", "text", "value", "in", "_xml_tree", "or", "a", "function", "that", "takes", "no", "parameters", "and", "returns", "the", "intended", "value", "." ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L236-L254
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._init_data_map
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
python
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
[ "def", "_init_data_map", "(", "self", ")", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_data_map", "=", "{", "'_root'", ":", "None", "}", "self", ".", "_data_map", ".", "update", "(", "{", "}", ".", "fromkeys", "(", "self", ".", "_metadata_props", ")", ")" ]
Default data map initialization: MUST be overridden in children
[ "Default", "data", "map", "initialization", ":", "MUST", "be", "overridden", "in", "children" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L256-L261
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._get_template
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
python
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
[ "def", "_get_template", "(", "self", ",", "root", "=", "None", ",", "*", "*", "metadata_defaults", ")", ":", "if", "root", "is", "None", ":", "if", "self", ".", "_data_map", "is", "None", ":", "self", ".", "_init_data_map", "(", ")", "root", "=", "self", ".", "_xml_root", "=", "self", ".", "_data_map", "[", "'_root'", "]", "template_tree", "=", "self", ".", "_xml_tree", "=", "create_element_tree", "(", "root", ")", "for", "prop", ",", "val", "in", "iteritems", "(", "metadata_defaults", ")", ":", "path", "=", "self", ".", "_data_map", ".", "get", "(", "prop", ")", "if", "path", "and", "val", ":", "setattr", "(", "self", ",", "prop", ",", "val", ")", "update_property", "(", "template_tree", ",", "None", ",", "path", ",", "prop", ",", "val", ")", "return", "template_tree" ]
Iterate over items metadata_defaults {prop: val, ...} to populate template
[ "Iterate", "over", "items", "metadata_defaults", "{", "prop", ":", "val", "...", "}", "to", "populate", "template" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L263-L280
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._get_xpath_for
def _get_xpath_for(self, prop): """ :return: the configured xpath for a given property """ xpath = self._data_map.get(prop) return getattr(xpath, 'xpath', xpath)
python
def _get_xpath_for(self, prop): """ :return: the configured xpath for a given property """ xpath = self._data_map.get(prop) return getattr(xpath, 'xpath', xpath)
[ "def", "_get_xpath_for", "(", "self", ",", "prop", ")", ":", "xpath", "=", "self", ".", "_data_map", ".", "get", "(", "prop", ")", "return", "getattr", "(", "xpath", ",", "'xpath'", ",", "xpath", ")" ]
:return: the configured xpath for a given property
[ ":", "return", ":", "the", "configured", "xpath", "for", "a", "given", "property" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L282-L286
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._parse_complex
def _parse_complex(self, prop): """ Default parsing operation for a complex struct """ xpath_root = None xpath_map = self._data_structures[prop] return parse_complex(self._xml_tree, xpath_root, xpath_map, prop)
python
def _parse_complex(self, prop): """ Default parsing operation for a complex struct """ xpath_root = None xpath_map = self._data_structures[prop] return parse_complex(self._xml_tree, xpath_root, xpath_map, prop)
[ "def", "_parse_complex", "(", "self", ",", "prop", ")", ":", "xpath_root", "=", "None", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "parse_complex", "(", "self", ".", "_xml_tree", ",", "xpath_root", ",", "xpath_map", ",", "prop", ")" ]
Default parsing operation for a complex struct
[ "Default", "parsing", "operation", "for", "a", "complex", "struct" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L293-L299
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._parse_complex_list
def _parse_complex_list(self, prop): """ Default parsing operation for lists of complex structs """ xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
python
def _parse_complex_list(self, prop): """ Default parsing operation for lists of complex structs """ xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
[ "def", "_parse_complex_list", "(", "self", ",", "prop", ")", ":", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "parse_complex_list", "(", "self", ".", "_xml_tree", ",", "xpath_root", ",", "xpath_map", ",", "prop", ")" ]
Default parsing operation for lists of complex structs
[ "Default", "parsing", "operation", "for", "lists", "of", "complex", "structs" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L301-L307
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._parse_dates
def _parse_dates(self, prop=DATES): """ Creates and returns a Date Types data structure parsed from the metadata """ return parse_dates(self._xml_tree, self._data_structures[prop])
python
def _parse_dates(self, prop=DATES): """ Creates and returns a Date Types data structure parsed from the metadata """ return parse_dates(self._xml_tree, self._data_structures[prop])
[ "def", "_parse_dates", "(", "self", ",", "prop", "=", "DATES", ")", ":", "return", "parse_dates", "(", "self", ".", "_xml_tree", ",", "self", ".", "_data_structures", "[", "prop", "]", ")" ]
Creates and returns a Date Types data structure parsed from the metadata
[ "Creates", "and", "returns", "a", "Date", "Types", "data", "structure", "parsed", "from", "the", "metadata" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L309-L312
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._update_complex
def _update_complex(self, **update_props): """ Default update operation for a complex struct """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
python
def _update_complex(self, **update_props): """ Default update operation for a complex struct """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
[ "def", "_update_complex", "(", "self", ",", "*", "*", "update_props", ")", ":", "prop", "=", "update_props", "[", "'prop'", "]", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "update_complex", "(", "xpath_root", "=", "xpath_root", ",", "xpath_map", "=", "xpath_map", ",", "*", "*", "update_props", ")" ]
Default update operation for a complex struct
[ "Default", "update", "operation", "for", "a", "complex", "struct" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L314-L321
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._update_complex_list
def _update_complex_list(self, **update_props): """ Default update operation for lists of complex structs """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
python
def _update_complex_list(self, **update_props): """ Default update operation for lists of complex structs """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
[ "def", "_update_complex_list", "(", "self", ",", "*", "*", "update_props", ")", ":", "prop", "=", "update_props", "[", "'prop'", "]", "xpath_root", "=", "self", ".", "_get_xroot_for", "(", "prop", ")", "xpath_map", "=", "self", ".", "_data_structures", "[", "prop", "]", "return", "update_complex_list", "(", "xpath_root", "=", "xpath_root", ",", "xpath_map", "=", "xpath_map", ",", "*", "*", "update_props", ")" ]
Default update operation for lists of complex structs
[ "Default", "update", "operation", "for", "lists", "of", "complex", "structs" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L323-L330
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser._update_dates
def _update_dates(self, xpath_root=None, **update_props): """ Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES] """ tree_to_update = update_props['tree_to_update'] prop = update_props['prop'] values = (update_props['values'] or {}).get(DATE_VALUES) or u'' xpaths = self._data_structures[prop] if not self.dates: date_xpaths = xpath_root elif self.dates[DATE_TYPE] != DATE_TYPE_RANGE: date_xpaths = xpaths.get(self.dates[DATE_TYPE], u'') else: date_xpaths = [ xpaths[DATE_TYPE_RANGE_BEGIN], xpaths[DATE_TYPE_RANGE_END] ] if xpath_root: remove_element(tree_to_update, xpath_root) return update_property(tree_to_update, xpath_root, date_xpaths, prop, values)
python
def _update_dates(self, xpath_root=None, **update_props): """ Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES] """ tree_to_update = update_props['tree_to_update'] prop = update_props['prop'] values = (update_props['values'] or {}).get(DATE_VALUES) or u'' xpaths = self._data_structures[prop] if not self.dates: date_xpaths = xpath_root elif self.dates[DATE_TYPE] != DATE_TYPE_RANGE: date_xpaths = xpaths.get(self.dates[DATE_TYPE], u'') else: date_xpaths = [ xpaths[DATE_TYPE_RANGE_BEGIN], xpaths[DATE_TYPE_RANGE_END] ] if xpath_root: remove_element(tree_to_update, xpath_root) return update_property(tree_to_update, xpath_root, date_xpaths, prop, values)
[ "def", "_update_dates", "(", "self", ",", "xpath_root", "=", "None", ",", "*", "*", "update_props", ")", ":", "tree_to_update", "=", "update_props", "[", "'tree_to_update'", "]", "prop", "=", "update_props", "[", "'prop'", "]", "values", "=", "(", "update_props", "[", "'values'", "]", "or", "{", "}", ")", ".", "get", "(", "DATE_VALUES", ")", "or", "u''", "xpaths", "=", "self", ".", "_data_structures", "[", "prop", "]", "if", "not", "self", ".", "dates", ":", "date_xpaths", "=", "xpath_root", "elif", "self", ".", "dates", "[", "DATE_TYPE", "]", "!=", "DATE_TYPE_RANGE", ":", "date_xpaths", "=", "xpaths", ".", "get", "(", "self", ".", "dates", "[", "DATE_TYPE", "]", ",", "u''", ")", "else", ":", "date_xpaths", "=", "[", "xpaths", "[", "DATE_TYPE_RANGE_BEGIN", "]", ",", "xpaths", "[", "DATE_TYPE_RANGE_END", "]", "]", "if", "xpath_root", ":", "remove_element", "(", "tree_to_update", ",", "xpath_root", ")", "return", "update_property", "(", "tree_to_update", ",", "xpath_root", ",", "date_xpaths", ",", "prop", ",", "values", ")" ]
Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES]
[ "Default", "update", "operation", "for", "Dates", "metadata", ":", "see", ":", "gis_metadata", ".", "utils", ".", "_complex_definitions", "[", "DATES", "]" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L332-L356
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser.write
def write(self, use_template=False, out_file_or_path=None, encoding=DEFAULT_ENCODING): """ Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8 """ if not out_file_or_path: out_file_or_path = self.out_file_or_path if not out_file_or_path: # FileNotFoundError doesn't exist in Python 2 raise IOError('Output file path has not been provided') write_element(self.update(use_template), out_file_or_path, encoding)
python
def write(self, use_template=False, out_file_or_path=None, encoding=DEFAULT_ENCODING): """ Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8 """ if not out_file_or_path: out_file_or_path = self.out_file_or_path if not out_file_or_path: # FileNotFoundError doesn't exist in Python 2 raise IOError('Output file path has not been provided') write_element(self.update(use_template), out_file_or_path, encoding)
[ "def", "write", "(", "self", ",", "use_template", "=", "False", ",", "out_file_or_path", "=", "None", ",", "encoding", "=", "DEFAULT_ENCODING", ")", ":", "if", "not", "out_file_or_path", ":", "out_file_or_path", "=", "self", ".", "out_file_or_path", "if", "not", "out_file_or_path", ":", "# FileNotFoundError doesn't exist in Python 2", "raise", "IOError", "(", "'Output file path has not been provided'", ")", "write_element", "(", "self", ".", "update", "(", "use_template", ")", ",", "out_file_or_path", ",", "encoding", ")" ]
Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8
[ "Validates", "instance", "properties", "updates", "an", "XML", "tree", "with", "them", "and", "writes", "the", "content", "to", "a", "file", ".", ":", "param", "use_template", ":", "if", "True", "updates", "a", "new", "template", "XML", "tree", ";", "otherwise", "the", "original", "XML", "tree", ":", "param", "out_file_or_path", ":", "optionally", "override", "self", ".", "out_file_or_path", "with", "a", "custom", "file", "path", ":", "param", "encoding", ":", "optionally", "use", "another", "encoding", "instead", "of", "UTF", "-", "8" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L373-L388
train
consbio/gis-metadata-parser
gis_metadata/metadata_parser.py
MetadataParser.validate
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
python
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
[ "def", "validate", "(", "self", ")", ":", "validate_properties", "(", "self", ".", "_data_map", ",", "self", ".", "_metadata_props", ")", "for", "prop", "in", "self", ".", "_data_map", ":", "validate_any", "(", "prop", ",", "getattr", "(", "self", ",", "prop", ")", ",", "self", ".", "_data_structures", ".", "get", "(", "prop", ")", ")", "return", "self" ]
Default validation for updated properties: MAY be overridden in children
[ "Default", "validation", "for", "updated", "properties", ":", "MAY", "be", "overridden", "in", "children" ]
59eefb2e51cd4d8cc3e94623a2167499ca9ef70f
https://github.com/consbio/gis-metadata-parser/blob/59eefb2e51cd4d8cc3e94623a2167499ca9ef70f/gis_metadata/metadata_parser.py#L410-L418
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
_search_regex
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
python
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
[ "def", "_search_regex", "(", "ops", ":", "dict", ",", "regex_pat", ":", "str", ")", ":", "custom_regexps", "=", "list", "(", "filter", "(", "None", ",", "[", "dic", "[", "'regex'", "]", "for", "op", ",", "dic", "in", "ops", ".", "items", "(", ")", "if", "'regex'", "in", "dic", "]", ")", ")", "op_names", "=", "[", "op", "for", "op", ",", "dic", "in", "ops", ".", "items", "(", ")", "if", "'regex'", "not", "in", "dic", "]", "regex", "=", "[", "regex_pat", ".", "format", "(", "_ops_regex", "(", "op_names", ")", ")", "]", "if", "len", "(", "op_names", ")", ">", "0", "else", "[", "]", "return", "re", ".", "compile", "(", "'|'", ".", "join", "(", "custom_regexps", "+", "regex", ")", ")" ]
Search order: * specified regexps * operators sorted from longer to shorter
[ "Search", "order", ":", "*", "specified", "regexps", "*", "operators", "sorted", "from", "longer", "to", "shorter" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L165-L174
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
Styles.spec
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
python
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
[ "def", "spec", "(", "self", ",", "postf_un_ops", ":", "str", ")", "->", "list", ":", "spec", "=", "[", "(", "l", "+", "op", ",", "{", "'pat'", ":", "self", ".", "pat", "(", "pat", ")", ",", "'postf'", ":", "self", ".", "postf", "(", "r", ",", "postf_un_ops", ")", ",", "'regex'", ":", "None", "}", ")", "for", "op", ",", "pat", "in", "self", ".", "styles", ".", "items", "(", ")", "for", "l", ",", "r", "in", "self", ".", "brackets", "]", "spec", "[", "0", "]", "[", "1", "]", "[", "'regex'", "]", "=", "self", ".", "regex_pat", ".", "format", "(", "_ops_regex", "(", "l", "for", "l", ",", "r", "in", "self", ".", "brackets", ")", ",", "_ops_regex", "(", "self", ".", "styles", ".", "keys", "(", ")", ")", ")", "return", "spec" ]
Return prefix unary operators list
[ "Return", "prefix", "unary", "operators", "list" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L216-L227
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
PrefUnGreedy.spec
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
python
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
[ "def", "spec", "(", "self", ")", "->", "list", ":", "spec", "=", "[", "item", "for", "op", ",", "pat", "in", "self", ".", "ops", ".", "items", "(", ")", "for", "item", "in", "[", "(", "'{'", "+", "op", ",", "{", "'pat'", ":", "pat", ",", "'postf'", ":", "self", ".", "postf", ",", "'regex'", ":", "None", "}", ")", ",", "(", "'˱' ", " ", "p,", " ", "'", "pat':", " ", "at,", " ", "postf':", " ", "elf.", "p", "ostf,", " ", "regex':", " ", "one}", ")", "]", "", "]", "spec", "[", "0", "]", "[", "1", "]", "[", "'regex'", "]", "=", "self", ".", "regex_pat", ".", "format", "(", "_ops_regex", "(", "self", ".", "ops", ".", "keys", "(", ")", ")", ")", "return", "spec" ]
Returns prefix unary operators list. Sets only one regex for all items in the dict.
[ "Returns", "prefix", "unary", "operators", "list", ".", "Sets", "only", "one", "regex", "for", "all", "items", "in", "the", "dict", "." ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L242-L251
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
PrefUnOps.fill
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
python
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
[ "def", "fill", "(", "self", ",", "postf_un_ops", ":", "str", ")", ":", "for", "op", ",", "dic", "in", "self", ".", "ops", ".", "items", "(", ")", ":", "if", "'postf'", "not", "in", "dic", ":", "dic", "[", "'postf'", "]", "=", "self", ".", "postf", "self", ".", "ops", "=", "OrderedDict", "(", "self", ".", "styles", ".", "spec", "(", "postf_un_ops", ")", "+", "self", ".", "other_styles", ".", "spec", "(", "postf_un_ops", ")", "+", "self", ".", "pref_un_greedy", ".", "spec", "(", ")", "+", "list", "(", "self", ".", "ops", ".", "items", "(", ")", ")", ")", "for", "op", ",", "dic", "in", "self", ".", "ops", ".", "items", "(", ")", ":", "dic", "[", "'postf'", "]", "=", "re", ".", "compile", "(", "dic", "[", "'postf'", "]", ")", "self", ".", "regex", "=", "_search_regex", "(", "self", ".", "ops", ",", "self", ".", "regex_pat", ")" ]
Insert: * math styles * other styles * unary prefix operators without brackets * defaults
[ "Insert", ":", "*", "math", "styles", "*", "other", "styles", "*", "unary", "prefix", "operators", "without", "brackets", "*", "defaults" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L325-L344
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
PostfUnOps.one_symbol_ops_str
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
python
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
[ "def", "one_symbol_ops_str", "(", "self", ")", "->", "str", ":", "return", "re", ".", "escape", "(", "''", ".", "join", "(", "(", "key", "for", "key", "in", "self", ".", "ops", ".", "keys", "(", ")", "if", "len", "(", "key", ")", "==", "1", ")", ")", ")" ]
Regex-escaped string with all one-symbol operators
[ "Regex", "-", "escaped", "string", "with", "all", "one", "-", "symbol", "operators" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L389-L391
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
SugarTeX._su_scripts_regex
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
python
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
[ "def", "_su_scripts_regex", "(", "self", ")", ":", "sups", "=", "re", ".", "escape", "(", "''", ".", "join", "(", "[", "k", "for", "k", "in", "self", ".", "superscripts", ".", "keys", "(", ")", "]", ")", ")", "subs", "=", "re", ".", "escape", "(", "''", ".", "join", "(", "[", "k", "for", "k", "in", "self", ".", "subscripts", ".", "keys", "(", ")", "]", ")", ")", "# language=PythonRegExp", "su_regex", "=", "(", "r'\\\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' +", "", "r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format(", "", "", "", "", "su_", "=", "subs", "+", "sups", ",", "sub", "=", "subs", ",", "sup", "=", "sups", ")", "su_regex", "=", "re", ".", "compile", "(", "su_regex", ")", "def", "su_replace", "(", "m", ")", ":", "esc", ",", "sub", ",", "root_sup", ",", "sup", "=", "m", ".", "groups", "(", ")", "if", "esc", "is", "not", "None", ":", "return", "esc", "elif", "sub", "is", "not", "None", ":", "return", "'_{'", "+", "''", ".", "join", "(", "[", "c", "if", "(", "c", "in", "[", "'‹', ", "'", "', '˹", "'", " '˺'", "]", " els", "e", " ", "elf.", "ubsc", "r", "ipts[c] fo", "r", " ", "c", "in ", "u", "])", "+ '", "}", "'", "", "", "elif", "root_sup", "is", "not", "None", ":", "return", "''", ".", "join", "(", "[", "self", ".", "superscripts", "[", "c", "]", "for", "c", "in", "root_sup", "]", ")", "elif", "sup", "is", "not", "None", ":", "return", "'^{'", "+", "''", ".", "join", "(", "[", "c", "if", "(", "c", "in", "[", "'‹', ", "'", "', '˹", "'", " '˺'", "]", " els", "e", " ", "elf.", "uper", "s", "cripts[c] fo", "r", " ", "c", "in ", "u", "])", "+ '", "}", "'", "", "", "else", ":", "raise", "TypeError", "(", "\"Regex bug: this should never be reached\"", ")", "return", "[", "su_regex", ",", "su_replace", "]" ]
:return: [compiled regex, function]
[ ":", "return", ":", "[", "compiled", "regex", "function", "]" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L671-L696
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
SugarTeX._local_map
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s) + 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1 if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1 if balance < 0: break map_[len(s)] = balance return map_
python
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s) + 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1 if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1 if balance < 0: break map_[len(s)] = balance return map_
[ "def", "_local_map", "(", "match", ",", "loc", ":", "str", "=", "'lr'", ")", "->", "list", ":", "s", "=", "match", ".", "string", "map_", "=", "[", "None", "]", "*", "(", "len", "(", "s", ")", "+", "2", ")", "if", "loc", "==", "'l'", "or", "loc", "==", "'lr'", ":", "balance", "=", "0", "for", "i", "in", "reversed", "(", "range", "(", "0", ",", "match", ".", "start", "(", ")", ")", ")", ":", "map_", "[", "i", "]", "=", "balance", "c", ",", "prev", "=", "s", "[", "i", "]", ",", "(", "s", "[", "i", "-", "1", "]", "if", "i", ">", "0", "else", "''", ")", "if", "(", "c", "==", "'}'", "or", "c", "==", "'˲')", " ", "nd ", "rev ", "= ", "\\\\':", "", "balance", "+=", "1", "elif", "(", "c", "==", "'{'", "or", "c", "==", "'˱')", " ", "nd ", "rev ", "= ", "\\\\':", "", "balance", "-=", "1", "if", "balance", "<", "0", ":", "break", "map_", "[", "-", "1", "]", "=", "balance", "if", "loc", "==", "'r'", "or", "loc", "==", "'lr'", ":", "balance", "=", "0", "for", "i", "in", "range", "(", "match", ".", "end", "(", ")", ",", "len", "(", "s", ")", ")", ":", "map_", "[", "i", "]", "=", "balance", "c", ",", "prev", "=", "s", "[", "i", "]", ",", "s", "[", "i", "-", "1", "]", "if", "(", "c", "==", "'{'", "or", "c", "==", "'˱')", " ", "nd ", "rev ", "= ", "\\\\':", "", "balance", "+=", "1", "elif", "(", "c", "==", "'}'", "or", "c", "==", "'˲')", " ", "nd ", "rev ", "= ", "\\\\':", "", "balance", "-=", "1", "if", "balance", "<", "0", ":", "break", "map_", "[", "len", "(", "s", ")", "]", "=", "balance", "return", "map_" ]
:param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2
[ ":", "param", "match", ":", ":", "param", "loc", ":", "str", "l", "or", "r", "or", "lr", "turns", "on", "/", "off", "left", "/", "right", "local", "area", "calculation", ":", "return", ":", "list", "list", "of", "the", "same", "size", "as", "the", "string", "+", "2", "it", "s", "the", "local", "map", "that", "counted", "{", "and", "}", "list", "can", "contain", ":", "None", "or", "int", ">", "=", "0", "from", "the", "left", "of", "the", "operator", "match", ":", "in", "b", "}", "a", "if", "a", ":", "0", "then", "}", ":", "0", "and", "b", ":", "1", "in", "b", "{", "a", "if", "a", ":", "0", "then", "{", ":", "0", "and", "b", ":", "-", "1", "(", "None", ")", "from", "the", "right", "of", "the", "operator", "match", ":", "in", "a", "{", "b", "if", "a", ":", "0", "then", "{", ":", "0", "and", "b", ":", "1", "in", "a", "}", "b", "if", "a", ":", "0", "then", "}", ":", "0", "and", "b", ":", "-", "1", "(", "None", ")", "Map", "for", "+", "1", "(", "needed", "for", "r", "$", ")", "and", "-", "1", "(", "needed", "for", "r", "^", ")", "characters", "is", "also", "stored", ":", "+", "1", "-", ">", "+", "1", "-", "1", "-", ">", "+", "2" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L708-L753
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
SugarTeX._operators_replace
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count > self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()] if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end() elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
python
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count > self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()] if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end() elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
[ "def", "_operators_replace", "(", "self", ",", "string", ":", "str", ")", "->", "str", ":", "# noinspection PyShadowingNames", "def", "replace", "(", "string", ":", "str", ",", "start", ":", "int", ",", "end", ":", "int", ",", "substring", ":", "str", ")", "->", "str", ":", "return", "string", "[", "0", ":", "start", "]", "+", "substring", "+", "string", "[", "end", ":", "len", "(", "string", ")", "]", "# noinspection PyShadowingNames", "def", "sub_pat", "(", "pat", ":", "Callable", "[", "[", "list", "]", ",", "str", "]", "or", "str", ",", "terms", ":", "list", ")", "->", "str", ":", "if", "isinstance", "(", "pat", ",", "str", ")", ":", "return", "pat", ".", "format", "(", "*", "terms", ")", "else", ":", "return", "pat", "(", "terms", ")", "count", "=", "0", "def", "check", "(", ")", ":", "nonlocal", "count", "count", "+=", "1", "if", "count", ">", "self", ".", "max_while", ":", "raise", "RuntimeError", "(", "'Presumably while loop is stuck'", ")", "# noinspection PyShadowingNames", "def", "null_replace", "(", "match", ")", "->", "str", ":", "regex_terms", "=", "[", "gr", "for", "gr", "in", "match", ".", "groups", "(", ")", "if", "gr", "is", "not", "None", "]", "op", "=", "regex_terms", "[", "0", "]", "terms", "=", "regex_terms", "[", "1", ":", "]", "return", "sub_pat", "(", "self", ".", "null_ops", ".", "ops", "[", "op", "]", "[", "'pat'", "]", ",", "terms", ")", "string", "=", "self", ".", "null_ops", ".", "regex", ".", "sub", "(", "null_replace", ",", "string", ")", "for", "ops", ",", "loc", "in", "[", "(", "self", ".", "pref_un_ops", ",", "'r'", ")", ",", "(", "self", ".", "postf_un_ops", ",", "'l'", ")", ",", "(", "self", ".", "bin_centr_ops", ",", "'lr'", ")", "]", ":", "count", "=", "0", "match", "=", "ops", ".", "regex", ".", "search", "(", "string", ")", "while", "match", ":", "check", "(", ")", "regex_terms", "=", "[", "gr", "for", "gr", "in", "match", ".", "groups", "(", ")", "if", "gr", "is", "not", "None", "]", "op", "=", "regex_terms", "[", "0", "]", "loc_map", "=", "self", ".", "_local_map", "(", "match", ",", "loc", ")", "lmatch", ",", "rmatch", "=", "None", ",", "None", "if", "loc", "==", "'l'", "or", "loc", "==", "'lr'", ":", "for", "m", "in", "ops", ".", "ops", "[", "op", "]", "[", "'pref'", "]", ".", "finditer", "(", "string", ")", ":", "if", "m", ".", "end", "(", ")", "<=", "match", ".", "start", "(", ")", "and", "loc_map", "[", "m", ".", "end", "(", ")", "-", "1", "]", "==", "0", ":", "lmatch", "=", "m", "if", "lmatch", "is", "None", ":", "string", "=", "replace", "(", "string", ",", "match", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", ",", "match", ".", "group", "(", "0", ")", ".", "replace", "(", "op", ",", "'\\\\'", "+", "op", ")", ")", "match", "=", "ops", ".", "regex", ".", "search", "(", "string", ")", "continue", "else", ":", "term1", "=", "string", "[", "lmatch", ".", "end", "(", ")", ":", "match", ".", "start", "(", ")", "]", "if", "loc", "==", "'r'", "or", "loc", "==", "'lr'", ":", "for", "m", "in", "ops", ".", "ops", "[", "op", "]", "[", "'postf'", "]", ".", "finditer", "(", "string", ")", ":", "if", "m", ".", "start", "(", ")", ">=", "match", ".", "end", "(", ")", "and", "loc_map", "[", "m", ".", "start", "(", ")", "]", "==", "0", ":", "rmatch", "=", "m", "break", "if", "rmatch", "is", "None", ":", "string", "=", "replace", "(", "string", ",", "match", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", ",", "match", ".", "group", "(", "0", ")", ".", "replace", "(", "op", ",", "'\\\\'", "+", "op", ")", ")", "match", "=", "ops", ".", "regex", ".", "search", "(", "string", ")", "continue", "else", ":", "term2", "=", "string", "[", "match", ".", "end", "(", ")", ":", "rmatch", ".", "start", "(", ")", "]", "if", "loc", "==", "'l'", ":", "# noinspection PyUnboundLocalVariable", "terms", "=", "list", "(", "lmatch", ".", "groups", "(", ")", ")", "+", "[", "term1", "]", "+", "regex_terms", "[", "1", ":", "]", "start", ",", "end", "=", "lmatch", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", "elif", "loc", "==", "'r'", ":", "# noinspection PyUnboundLocalVariable", "terms", "=", "regex_terms", "[", "1", ":", "]", "+", "[", "term2", "]", "+", "list", "(", "rmatch", ".", "groups", "(", ")", ")", "start", ",", "end", "=", "match", ".", "start", "(", ")", ",", "rmatch", ".", "end", "(", ")", "elif", "loc", "==", "'lr'", ":", "terms", "=", "list", "(", "lmatch", ".", "groups", "(", ")", ")", "+", "[", "term1", "]", "+", "regex_terms", "[", "1", ":", "]", "+", "[", "term2", "]", "+", "list", "(", "rmatch", ".", "groups", "(", ")", ")", "start", ",", "end", "=", "lmatch", ".", "start", "(", ")", ",", "rmatch", ".", "end", "(", ")", "else", ":", "# this never happen", "terms", "=", "regex_terms", "[", "1", ":", "]", "start", ",", "end", "=", "match", ".", "start", "(", ")", ",", "match", ".", "end", "(", ")", "string", "=", "replace", "(", "string", ",", "start", ",", "end", ",", "sub_pat", "(", "ops", ".", "ops", "[", "op", "]", "[", "'pat'", "]", ",", "terms", ")", ")", "match", "=", "ops", ".", "regex", ".", "search", "(", "string", ")", "return", "string" ]
Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace
[ "Searches", "for", "first", "unary", "or", "binary", "operator", "(", "via", "self", ".", "op_regex", "that", "has", "only", "one", "group", "that", "contain", "operator", ")", "then", "replaces", "it", "(", "or", "escapes", "it", "if", "brackets", "do", "not", "match", ")", ".", "Everything", "until", ":", "*", "space", "*", "begin", "/", "end", "of", "the", "string", "*", "bracket", "from", "outer", "scope", "(", "like", "{", "a", "/", "b", "}", ":", "term1", "=", "a", "term2", "=", "b", ")", "is", "considered", "a", "term", "(", "contents", "of", "matching", "brackets", "{}", "are", "ignored", ")", "." ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L755-L849
train
kiwi0fruit/sugartex
sugartex/sugartex_filter.py
SugarTeX.replace
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
python
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
[ "def", "replace", "(", "self", ",", "src", ":", "str", ")", "->", "str", ":", "if", "not", "self", ".", "readied", ":", "self", ".", "ready", "(", ")", "# Brackets + simple pre replacements:", "src", "=", "self", ".", "_dict_replace", "(", "self", ".", "simple_pre", ",", "src", ")", "# Superscripts and subscripts + pre regexps:", "for", "regex", ",", "replace", "in", "self", ".", "regex_pre", ":", "src", "=", "regex", ".", "sub", "(", "replace", ",", "src", ")", "# Unary and binary operators:", "src", "=", "self", ".", "_operators_replace", "(", "src", ")", "# Loop regexps:", "src_prev", "=", "src", "for", "i", "in", "range", "(", "self", ".", "max_iter", ")", ":", "for", "regex", ",", "replace", "in", "self", ".", "loop_regexps", ":", "src", "=", "regex", ".", "sub", "(", "replace", ",", "src", ")", "if", "src_prev", "==", "src", ":", "break", "else", ":", "src_prev", "=", "src", "# Post regexps:", "for", "regex", ",", "replace", "in", "self", ".", "regex_post", ":", "src", "=", "regex", ".", "sub", "(", "replace", ",", "src", ")", "# Simple post replacements:", "src", "=", "self", ".", "_dict_replace", "(", "self", ".", "simple_post", ",", "src", ")", "# Escape characters:", "src", "=", "self", ".", "escapes_regex", ".", "sub", "(", "r'\\1'", ",", "src", ")", "return", "src" ]
Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string
[ "Extends", "LaTeX", "syntax", "via", "regex", "preprocess", ":", "param", "src", ":", "str", "LaTeX", "string", ":", "return", ":", "str", "New", "LaTeX", "string" ]
9eb13703cb02d3e2163c9c5f29df280f6bf49cec
https://github.com/kiwi0fruit/sugartex/blob/9eb13703cb02d3e2163c9c5f29df280f6bf49cec/sugartex/sugartex_filter.py#L863-L904
train
christophertbrown/bioscripts
ctbBio/strip_align.py
plot_gaps
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
python
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
[ "def", "plot_gaps", "(", "plot", ",", "columns", ")", ":", "from", "plot_window", "import", "window_plot_convolve", "as", "plot_window", "#\tplot_window([columns], len(columns)*.01, plot)", "plot_window", "(", "[", "[", "100", "-", "i", "for", "i", "in", "columns", "]", "]", ",", "len", "(", "columns", ")", "*", ".01", ",", "plot", ")" ]
plot % of gaps at each position
[ "plot", "%", "of", "gaps", "at", "each", "position" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align.py#L11-L17
train
christophertbrown/bioscripts
ctbBio/strip_align.py
strip_msa_100
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
python
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
[ "def", "strip_msa_100", "(", "msa", ",", "threshold", ",", "plot", "=", "False", ")", ":", "msa", "=", "[", "seq", "for", "seq", "in", "parse_fasta", "(", "msa", ")", "]", "columns", "=", "[", "[", "0", ",", "0", "]", "for", "pos", "in", "msa", "[", "0", "]", "[", "1", "]", "]", "# [[#bases, #gaps], [#bases, #gaps], ...]", "for", "seq", "in", "msa", ":", "for", "position", ",", "base", "in", "enumerate", "(", "seq", "[", "1", "]", ")", ":", "if", "base", "==", "'-'", "or", "base", "==", "'.'", ":", "columns", "[", "position", "]", "[", "1", "]", "+=", "1", "else", ":", "columns", "[", "position", "]", "[", "0", "]", "+=", "1", "columns", "=", "[", "float", "(", "float", "(", "g", ")", "/", "float", "(", "g", "+", "b", ")", "*", "100", ")", "for", "b", ",", "g", "in", "columns", "]", "# convert to percent gaps", "for", "seq", "in", "msa", ":", "stripped", "=", "[", "]", "for", "position", ",", "base", "in", "enumerate", "(", "seq", "[", "1", "]", ")", ":", "if", "columns", "[", "position", "]", "<", "threshold", ":", "stripped", ".", "append", "(", "base", ")", "yield", "[", "seq", "[", "0", "]", ",", "''", ".", "join", "(", "stripped", ")", "]", "if", "plot", "is", "not", "False", ":", "plot_gaps", "(", "plot", ",", "columns", ")" ]
strip out columns of a MSA that represent gaps for X percent (threshold) of sequences
[ "strip", "out", "columns", "of", "a", "MSA", "that", "represent", "gaps", "for", "X", "percent", "(", "threshold", ")", "of", "sequences" ]
83b2566b3a5745437ec651cd6cafddd056846240
https://github.com/christophertbrown/bioscripts/blob/83b2566b3a5745437ec651cd6cafddd056846240/ctbBio/strip_align.py#L19-L39
train
smdabdoub/phylotoast
bin/extract_shared_or_unique_otuids.py
sample_group
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
python
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
[ "def", "sample_group", "(", "sid", ",", "groups", ")", ":", "for", "name", "in", "groups", ":", "if", "sid", "in", "groups", "[", "name", "]", ".", "sids", ":", "return", "name" ]
Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`.
[ "Iterate", "through", "all", "categories", "in", "an", "OrderedDict", "and", "return", "category", "name", "if", "SampleID", "present", "in", "that", "category", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L22-L38
train
smdabdoub/phylotoast
bin/extract_shared_or_unique_otuids.py
combine_sets
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
python
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
[ "def", "combine_sets", "(", "*", "sets", ")", ":", "combined", "=", "set", "(", ")", "for", "s", "in", "sets", ":", "combined", ".", "update", "(", "s", ")", "return", "combined" ]
Combine multiple sets to create a single larger set.
[ "Combine", "multiple", "sets", "to", "create", "a", "single", "larger", "set", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L41-L48
train
smdabdoub/phylotoast
bin/extract_shared_or_unique_otuids.py
unique_otuids
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
python
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
[ "def", "unique_otuids", "(", "groups", ")", ":", "uniques", "=", "{", "key", ":", "set", "(", ")", "for", "key", "in", "groups", "}", "for", "i", ",", "group", "in", "enumerate", "(", "groups", ")", ":", "to_combine", "=", "groups", ".", "values", "(", ")", "[", ":", "i", "]", "+", "groups", ".", "values", "(", ")", "[", "i", "+", "1", ":", "]", "combined", "=", "combine_sets", "(", "*", "to_combine", ")", "uniques", "[", "group", "]", "=", "groups", "[", "group", "]", ".", "difference", "(", "combined", ")", "return", "uniques" ]
Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values.
[ "Get", "unique", "OTUIDs", "of", "each", "category", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L51-L66
train
smdabdoub/phylotoast
bin/extract_shared_or_unique_otuids.py
shared_otuids
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups), i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
python
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups), i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
[ "def", "shared_otuids", "(", "groups", ")", ":", "for", "g", "in", "sorted", "(", "groups", ")", ":", "print", "(", "\"Number of OTUs in {0}: {1}\"", ".", "format", "(", "g", ",", "len", "(", "groups", "[", "g", "]", ".", "results", "[", "\"otuids\"", "]", ")", ")", ")", "number_of_categories", "=", "len", "(", "groups", ")", "shared", "=", "defaultdict", "(", ")", "for", "i", "in", "range", "(", "2", ",", "number_of_categories", "+", "1", ")", ":", "for", "j", "in", "combinations", "(", "sorted", "(", "groups", ")", ",", "i", ")", ":", "combo_name", "=", "\" & \"", ".", "join", "(", "list", "(", "j", ")", ")", "for", "grp", "in", "j", ":", "# initialize combo values", "shared", "[", "combo_name", "]", "=", "groups", "[", "j", "[", "0", "]", "]", ".", "results", "[", "\"otuids\"", "]", ".", "copy", "(", ")", "\"\"\"iterate through all groups and keep updating combo OTUIDs with set\n intersection_update\"\"\"", "for", "grp", "in", "j", "[", "1", ":", "]", ":", "shared", "[", "combo_name", "]", ".", "intersection_update", "(", "groups", "[", "grp", "]", ".", "results", "[", "\"otuids\"", "]", ")", "return", "shared" ]
Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values.
[ "Get", "shared", "OTUIDs", "between", "all", "unique", "combinations", "of", "groups", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L69-L93
train
smdabdoub/phylotoast
bin/extract_shared_or_unique_otuids.py
write_uniques
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
python
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
[ "def", "write_uniques", "(", "path", ",", "prefix", ",", "uniques", ")", ":", "for", "group", "in", "uniques", ":", "fp", "=", "osp", ".", "join", "(", "path", ",", "\"{}_{}.txt\"", ".", "format", "(", "prefix", ",", "group", ")", ")", "with", "open", "(", "fp", ",", "\"w\"", ")", "as", "outf", ":", "outf", ".", "write", "(", "\"\\n\"", ".", "join", "(", "uniques", "[", "group", "]", ")", ")" ]
Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function.
[ "Given", "a", "path", "the", "method", "writes", "out", "one", "file", "for", "each", "group", "name", "in", "the", "uniques", "dictionary", "with", "the", "file", "name", "in", "the", "pattern" ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/bin/extract_shared_or_unique_otuids.py#L96-L118
train
smdabdoub/phylotoast
phylotoast/util.py
storeFASTA
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
python
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
[ "def", "storeFASTA", "(", "fastaFNH", ")", ":", "fasta", "=", "file_handle", "(", "fastaFNH", ")", ".", "read", "(", ")", "return", "[", "FASTARecord", "(", "rec", "[", "0", "]", ".", "split", "(", ")", "[", "0", "]", ",", "rec", "[", "0", "]", ".", "split", "(", "None", ",", "1", ")", "[", "1", "]", ",", "\"\"", ".", "join", "(", "rec", "[", "1", ":", "]", ")", ")", "for", "rec", "in", "(", "x", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", "for", "x", "in", "fasta", ".", "split", "(", "\">\"", ")", "[", "1", ":", "]", ")", "]" ]
Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
[ "Parse", "the", "records", "in", "a", "FASTA", "-", "format", "file", "by", "first", "reading", "the", "entire", "file", "into", "memory", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L20-L34
train
smdabdoub/phylotoast
phylotoast/util.py
parseFASTA
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
python
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
[ "def", "parseFASTA", "(", "fastaFNH", ")", ":", "recs", "=", "[", "]", "seq", "=", "[", "]", "seqID", "=", "\"\"", "descr", "=", "\"\"", "for", "line", "in", "file_handle", "(", "fastaFNH", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", "[", "0", "]", "==", "\";\"", ":", "continue", "if", "line", "[", "0", "]", "==", "\">\"", ":", "# conclude previous record", "if", "seq", ":", "recs", ".", "append", "(", "FASTARecord", "(", "seqID", ",", "descr", ",", "\"\"", ".", "join", "(", "seq", ")", ")", ")", "seq", "=", "[", "]", "# start new record", "line", "=", "line", "[", "1", ":", "]", ".", "split", "(", "None", ",", "1", ")", "seqID", ",", "descr", "=", "line", "[", "0", "]", ",", "line", "[", "1", "]", "else", ":", "seq", ".", "append", "(", "line", ")", "# catch last seq in file", "if", "seq", ":", "recs", ".", "append", "(", "FASTARecord", "(", "seqID", ",", "descr", ",", "\"\"", ".", "join", "(", "seq", ")", ")", ")", "return", "recs" ]
Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
[ "Parse", "the", "records", "in", "a", "FASTA", "-", "format", "file", "keeping", "the", "file", "open", "and", "reading", "through", "one", "line", "at", "a", "time", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L37-L73
train
smdabdoub/phylotoast
phylotoast/util.py
parse_map_file
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
python
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
[ "def", "parse_map_file", "(", "mapFNH", ")", ":", "m", "=", "OrderedDict", "(", ")", "map_header", "=", "None", "with", "file_handle", "(", "mapFNH", ")", "as", "mapF", ":", "for", "line", "in", "mapF", ":", "if", "line", ".", "startswith", "(", "\"#SampleID\"", ")", ":", "map_header", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "if", "line", ".", "startswith", "(", "\"#\"", ")", "or", "not", "line", ":", "continue", "line", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "m", "[", "line", "[", "0", "]", "]", "=", "line", "return", "map_header", ",", "m" ]
Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral
[ "Opens", "a", "QIIME", "mapping", "file", "and", "stores", "the", "contents", "in", "a", "dictionary", "keyed", "on", "SampleID", "(", "default", ")", "or", "a", "user", "-", "supplied", "one", ".", "The", "only", "required", "fields", "are", "SampleID", "BarcodeSequence", "LinkerPrimerSequence", "(", "in", "that", "order", ")", "and", "Description", "(", "which", "must", "be", "the", "final", "field", ")", "." ]
0b74ef171e6a84761710548501dfac71285a58a3
https://github.com/smdabdoub/phylotoast/blob/0b74ef171e6a84761710548501dfac71285a58a3/phylotoast/util.py#L76-L108
train