commit
stringlengths 40
40
| old_file
stringlengths 4
106
| new_file
stringlengths 4
106
| old_contents
stringlengths 10
2.94k
| new_contents
stringlengths 21
2.95k
| subject
stringlengths 16
444
| message
stringlengths 17
2.63k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 7
43k
| ndiff
stringlengths 52
3.31k
| instruction
stringlengths 16
444
| content
stringlengths 133
4.32k
| diff
stringlengths 49
3.61k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6064db3000f2aeec66a775345d22b8a2b421497f
|
astropy/utils/tests/test_gzip.py
|
astropy/utils/tests/test_gzip.py
|
import io
import os
from ...tests.helper import pytest
from .. import gzip
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
|
import io
import os
from ...tests.helper import pytest
from .. import gzip
pytestmark = pytest.mark.skipif("sys.version_info < (3,0)")
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
|
Fix gzip test for Python 2.6
|
Fix gzip test for Python 2.6
|
Python
|
bsd-3-clause
|
tbabej/astropy,bsipocz/astropy,lpsinger/astropy,MSeifert04/astropy,StuartLittlefair/astropy,larrybradley/astropy,DougBurke/astropy,stargaser/astropy,pllim/astropy,stargaser/astropy,MSeifert04/astropy,tbabej/astropy,lpsinger/astropy,joergdietrich/astropy,astropy/astropy,joergdietrich/astropy,dhomeier/astropy,kelle/astropy,kelle/astropy,saimn/astropy,MSeifert04/astropy,bsipocz/astropy,funbaker/astropy,kelle/astropy,pllim/astropy,dhomeier/astropy,StuartLittlefair/astropy,pllim/astropy,dhomeier/astropy,MSeifert04/astropy,larrybradley/astropy,mhvk/astropy,funbaker/astropy,StuartLittlefair/astropy,stargaser/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,astropy/astropy,AustereCuriosity/astropy,tbabej/astropy,DougBurke/astropy,AustereCuriosity/astropy,funbaker/astropy,pllim/astropy,lpsinger/astropy,larrybradley/astropy,funbaker/astropy,dhomeier/astropy,mhvk/astropy,joergdietrich/astropy,saimn/astropy,kelle/astropy,lpsinger/astropy,stargaser/astropy,mhvk/astropy,bsipocz/astropy,StuartLittlefair/astropy,dhomeier/astropy,saimn/astropy,mhvk/astropy,joergdietrich/astropy,astropy/astropy,AustereCuriosity/astropy,AustereCuriosity/astropy,astropy/astropy,aleksandr-bakanov/astropy,joergdietrich/astropy,mhvk/astropy,aleksandr-bakanov/astropy,saimn/astropy,larrybradley/astropy,larrybradley/astropy,tbabej/astropy,pllim/astropy,bsipocz/astropy,aleksandr-bakanov/astropy,DougBurke/astropy,astropy/astropy,kelle/astropy,lpsinger/astropy,AustereCuriosity/astropy,tbabej/astropy,saimn/astropy,DougBurke/astropy
|
import io
import os
from ...tests.helper import pytest
from .. import gzip
+ pytestmark = pytest.mark.skipif("sys.version_info < (3,0)")
+
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
|
Fix gzip test for Python 2.6
|
## Code Before:
import io
import os
from ...tests.helper import pytest
from .. import gzip
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
## Instruction:
Fix gzip test for Python 2.6
## Code After:
import io
import os
from ...tests.helper import pytest
from .. import gzip
pytestmark = pytest.mark.skipif("sys.version_info < (3,0)")
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
|
import io
import os
from ...tests.helper import pytest
from .. import gzip
+ pytestmark = pytest.mark.skipif("sys.version_info < (3,0)")
+
def test_gzip(tmpdir):
fd = gzip.GzipFile(str(tmpdir.join("test.gz")), 'wb')
fd = io.TextIOWrapper(fd, encoding='utf8')
|
58ae075463518e477185816094eb83f42ce5b77c
|
gcloud/bigquery/__init__.py
|
gcloud/bigquery/__init__.py
|
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
|
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.table import SchemaField
from gcloud.bigquery.table import Table
|
Add public API entties from 'bigquery.table'.
|
Add public API entties from 'bigquery.table'.
|
Python
|
apache-2.0
|
CyrusBiotechnology/gcloud-python,tseaver/google-cloud-python,Fkawala/gcloud-python,waprin/gcloud-python,jonparrott/gcloud-python,EugenePig/gcloud-python,dhermes/google-cloud-python,tswast/google-cloud-python,thesandlord/gcloud-python,tswast/google-cloud-python,EugenePig/gcloud-python,jbuberel/gcloud-python,dhermes/google-cloud-python,calpeyser/google-cloud-python,tseaver/gcloud-python,dhermes/gcloud-python,dhermes/google-cloud-python,tseaver/google-cloud-python,jgeewax/gcloud-python,tswast/google-cloud-python,waprin/google-cloud-python,GoogleCloudPlatform/gcloud-python,jonparrott/google-cloud-python,tseaver/google-cloud-python,vj-ug/gcloud-python,quom/google-cloud-python,googleapis/google-cloud-python,tseaver/gcloud-python,tartavull/google-cloud-python,daspecster/google-cloud-python,quom/google-cloud-python,dhermes/gcloud-python,jonparrott/google-cloud-python,googleapis/google-cloud-python,Fkawala/gcloud-python,jbuberel/gcloud-python,elibixby/gcloud-python,VitalLabs/gcloud-python,waprin/gcloud-python,GoogleCloudPlatform/gcloud-python,CyrusBiotechnology/gcloud-python,jonparrott/gcloud-python,jgeewax/gcloud-python,vj-ug/gcloud-python,VitalLabs/gcloud-python,waprin/google-cloud-python,thesandlord/gcloud-python,daspecster/google-cloud-python,elibixby/gcloud-python,tartavull/google-cloud-python,calpeyser/google-cloud-python
|
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
+ from gcloud.bigquery.table import SchemaField
+ from gcloud.bigquery.table import Table
|
Add public API entties from 'bigquery.table'.
|
## Code Before:
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
## Instruction:
Add public API entties from 'bigquery.table'.
## Code After:
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.table import SchemaField
from gcloud.bigquery.table import Table
|
from gcloud.bigquery.client import Client
from gcloud.bigquery.connection import SCOPE
from gcloud.bigquery.dataset import Dataset
+ from gcloud.bigquery.table import SchemaField
+ from gcloud.bigquery.table import Table
|
6fe06b2a2b504c28bc35ef2f429d72dc8082efca
|
cmsplugin_zinnia/placeholder.py
|
cmsplugin_zinnia/placeholder.py
|
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
from zinnia.models.entry import EntryAbstractClass
class EntryPlaceholder(EntryAbstractClass):
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
class Meta(EntryAbstractClass.Meta):
"""EntryPlaceholder's Meta"""
abstract = True
|
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
from zinnia.models_bases.entry import AbstractEntry
class EntryPlaceholder(AbstractEntry):
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
class Meta(AbstractEntry.Meta):
"""EntryPlaceholder's Meta"""
abstract = True
|
Use AbstractEntry instead of EntryAbstractClass
|
Use AbstractEntry instead of EntryAbstractClass
|
Python
|
bsd-3-clause
|
bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia
|
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
- from zinnia.models.entry import EntryAbstractClass
+ from zinnia.models_bases.entry import AbstractEntry
- class EntryPlaceholder(EntryAbstractClass):
+ class EntryPlaceholder(AbstractEntry):
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
- class Meta(EntryAbstractClass.Meta):
+ class Meta(AbstractEntry.Meta):
"""EntryPlaceholder's Meta"""
abstract = True
|
Use AbstractEntry instead of EntryAbstractClass
|
## Code Before:
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
from zinnia.models.entry import EntryAbstractClass
class EntryPlaceholder(EntryAbstractClass):
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
class Meta(EntryAbstractClass.Meta):
"""EntryPlaceholder's Meta"""
abstract = True
## Instruction:
Use AbstractEntry instead of EntryAbstractClass
## Code After:
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
from zinnia.models_bases.entry import AbstractEntry
class EntryPlaceholder(AbstractEntry):
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
class Meta(AbstractEntry.Meta):
"""EntryPlaceholder's Meta"""
abstract = True
|
"""Placeholder model for Zinnia"""
import inspect
from cms.models.fields import PlaceholderField
from cms.plugin_rendering import render_placeholder
- from zinnia.models.entry import EntryAbstractClass
? ----- ^^^^^
+ from zinnia.models_bases.entry import AbstractEntry
? ++++++ ^^^^^
- class EntryPlaceholder(EntryAbstractClass):
? ----- ^^^^^
+ class EntryPlaceholder(AbstractEntry):
? ^^^^^
"""Entry with a Placeholder to edit content"""
content_placeholder = PlaceholderField('content')
def acquire_context(self):
"""
Inspect the stack to acquire the current context used,
to render the placeholder. I'm really sorry for this,
but if you have a better way, you are welcome !
"""
frame = None
try:
for f in inspect.stack()[1:]:
frame = f[0]
args, varargs, keywords, alocals = inspect.getargvalues(frame)
if 'context' in args:
return alocals['context']
finally:
del frame
@property
def html_content(self):
"""
Render the content_placeholder field dynamicly.
https://github.com/Fantomas42/cmsplugin-zinnia/issues/3
"""
context = self.acquire_context()
return render_placeholder(self.content_placeholder, context)
- class Meta(EntryAbstractClass.Meta):
? ----- ^^^^^
+ class Meta(AbstractEntry.Meta):
? ^^^^^
"""EntryPlaceholder's Meta"""
abstract = True
|
072bc480cbc489cd89d03405026f152934893b7e
|
go/routers/keyword/view_definition.py
|
go/routers/keyword/view_definition.py
|
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if not form.is_valid():
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
|
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if (not form.is_valid()) or form.cleaned_data['DELETE']:
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
|
Revert "Remove unnecessary and broken DELETE check."
|
Revert "Remove unnecessary and broken DELETE check."
This reverts commit 7906153b4718f34ed31c193a8e80b171e567209c.
Reverting commit accidentally commited straight to develop.
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
- if not form.is_valid():
+ if (not form.is_valid()) or form.cleaned_data['DELETE']:
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
|
Revert "Remove unnecessary and broken DELETE check."
|
## Code Before:
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if not form.is_valid():
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
## Instruction:
Revert "Remove unnecessary and broken DELETE check."
## Code After:
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
if (not form.is_valid()) or form.cleaned_data['DELETE']:
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
|
from django import forms
from go.router.view_definition import RouterViewDefinitionBase, EditRouterView
class KeywordForm(forms.Form):
keyword = forms.CharField()
target_endpoint = forms.CharField()
class BaseKeywordFormSet(forms.formsets.BaseFormSet):
@staticmethod
def initial_from_config(data):
return [{'keyword': k, 'target_endpoint': v}
for k, v in sorted(data.items())]
def to_config(self):
keyword_endpoint_mapping = {}
for form in self:
- if not form.is_valid():
+ if (not form.is_valid()) or form.cleaned_data['DELETE']:
continue
keyword = form.cleaned_data['keyword']
target_endpoint = form.cleaned_data['target_endpoint']
keyword_endpoint_mapping[keyword] = target_endpoint
return keyword_endpoint_mapping
KeywordFormSet = forms.formsets.formset_factory(
KeywordForm, can_delete=True, extra=1, formset=BaseKeywordFormSet)
class EditKeywordView(EditRouterView):
edit_forms = (
('keyword_endpoint_mapping', KeywordFormSet),
)
class RouterViewDefinition(RouterViewDefinitionBase):
edit_view = EditKeywordView
|
9a5aee262b5a89e5a22e9e1390e23898a5373627
|
byceps/util/jobqueue.py
|
byceps/util/jobqueue.py
|
from contextlib import contextmanager
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
queue = get_queue()
queue.enqueue(*args, **kwargs)
|
from contextlib import contextmanager
from flask import current_app
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
queue = get_queue(current_app)
queue.enqueue(*args, **kwargs)
|
Fix `get_queue` call in `enqueue`
|
Fix `get_queue` call in `enqueue`
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps
|
from contextlib import contextmanager
+ from flask import current_app
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
- queue = get_queue()
+ queue = get_queue(current_app)
queue.enqueue(*args, **kwargs)
|
Fix `get_queue` call in `enqueue`
|
## Code Before:
from contextlib import contextmanager
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
queue = get_queue()
queue.enqueue(*args, **kwargs)
## Instruction:
Fix `get_queue` call in `enqueue`
## Code After:
from contextlib import contextmanager
from flask import current_app
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
queue = get_queue(current_app)
queue.enqueue(*args, **kwargs)
|
from contextlib import contextmanager
+ from flask import current_app
from rq import Connection, Queue
from byceps.redis import redis
@contextmanager
def connection():
with Connection(redis.client):
yield
def get_queue(app):
is_async = app.config['JOBS_ASYNC']
return Queue(is_async=is_async)
def enqueue(*args, **kwargs):
"""Add the function call to the queue as a job."""
with connection():
- queue = get_queue()
+ queue = get_queue(current_app)
? +++++++++++
queue.enqueue(*args, **kwargs)
|
d52c4340a62802bcd0fcbd68516c5ac66fb10436
|
ftfy/streamtester/__init__.py
|
ftfy/streamtester/__init__.py
|
from __future__ import print_function, unicode_literals
from ftfy.fixes import fix_text_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
fixed = fix_text_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
|
from __future__ import print_function, unicode_literals
from ftfy.fixes import fix_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
fixed = fix_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
|
Update function name used in the streamtester
|
Update function name used in the streamtester
|
Python
|
mit
|
LuminosoInsight/python-ftfy
|
from __future__ import print_function, unicode_literals
- from ftfy.fixes import fix_text_encoding
+ from ftfy.fixes import fix_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
- fixed = fix_text_encoding(text)
+ fixed = fix_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
|
Update function name used in the streamtester
|
## Code Before:
from __future__ import print_function, unicode_literals
from ftfy.fixes import fix_text_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
fixed = fix_text_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
## Instruction:
Update function name used in the streamtester
## Code After:
from __future__ import print_function, unicode_literals
from ftfy.fixes import fix_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
fixed = fix_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
|
from __future__ import print_function, unicode_literals
- from ftfy.fixes import fix_text_encoding
? -----
+ from ftfy.fixes import fix_encoding
from ftfy.chardata import possible_encoding
class StreamTester:
"""
Take in a sequence of texts, and show the ones that will be changed by
ftfy. This will also periodically show updates, such as the proportion of
texts that changed.
"""
def __init__(self):
self.num_fixed = 0
self.count = 0
def check_ftfy(self, text):
"""
Given a single text input, check whether `ftfy.fix_text_encoding`
would change it. If so, display the change.
"""
self.count += 1
if not possible_encoding(text, 'ascii'):
- fixed = fix_text_encoding(text)
? -----
+ fixed = fix_encoding(text)
if text != fixed:
# possibly filter common bots before printing
print(u'\nText:\t{text}\nFixed:\t{fixed}\n'.format(
text=text, fixed=fixed
))
self.num_fixed += 1
# Print status updates once in a while
if self.count % 100 == 0:
print('.', end='', flush=True)
if self.count % 10000 == 0:
print('\n%d/%d fixed' % (self.num_fixed, self.count))
|
33e1c781b0e430cb1e0df19d02ed06a193f9d202
|
waterbutler/identity.py
|
waterbutler/identity.py
|
import asyncio
from waterbutler import settings
@asyncio.coroutine
def fetch_rest_identity(params):
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
IDENTITY_METHODS = {
'rest': fetch_rest_identity
}
get_identity = IDENTITY_METHODS[settings.IDENTITY_METHOD]
|
import asyncio
import aiohttp
from waterbutler import settings
IDENTITY_METHODS = {}
def get_identity_func(name):
try:
return IDENTITY_METHODS[name]
except KeyError:
raise NotImplementedError('No identity getter for {0}'.format(name))
def register_identity(name):
def _register_identity(func):
IDENTITY_METHODS[name] = func
return func
return _register_identity
def get_identity(name, **kwargs):
return get_identity_func(name)(**kwargs)
@register_identity('rest')
@asyncio.coroutine
def fetch_rest_identity(**params):
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
|
Make use of a register decorator
|
Make use of a register decorator
|
Python
|
apache-2.0
|
CenterForOpenScience/waterbutler,kwierman/waterbutler,TomBaxter/waterbutler,rafaeldelucena/waterbutler,Ghalko/waterbutler,RCOSDP/waterbutler,hmoco/waterbutler,felliott/waterbutler,rdhyee/waterbutler,Johnetordoff/waterbutler,icereval/waterbutler,chrisseto/waterbutler,cosenal/waterbutler
|
import asyncio
+
+ import aiohttp
from waterbutler import settings
+ IDENTITY_METHODS = {}
+
+
+ def get_identity_func(name):
+ try:
+ return IDENTITY_METHODS[name]
+ except KeyError:
+ raise NotImplementedError('No identity getter for {0}'.format(name))
+
+
+ def register_identity(name):
+ def _register_identity(func):
+ IDENTITY_METHODS[name] = func
+ return func
+ return _register_identity
+
+
+ def get_identity(name, **kwargs):
+ return get_identity_func(name)(**kwargs)
+
+
+ @register_identity('rest')
@asyncio.coroutine
- def fetch_rest_identity(params):
+ def fetch_rest_identity(**params):
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
- IDENTITY_METHODS = {
- 'rest': fetch_rest_identity
- }
-
- get_identity = IDENTITY_METHODS[settings.IDENTITY_METHOD]
-
|
Make use of a register decorator
|
## Code Before:
import asyncio
from waterbutler import settings
@asyncio.coroutine
def fetch_rest_identity(params):
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
IDENTITY_METHODS = {
'rest': fetch_rest_identity
}
get_identity = IDENTITY_METHODS[settings.IDENTITY_METHOD]
## Instruction:
Make use of a register decorator
## Code After:
import asyncio
import aiohttp
from waterbutler import settings
IDENTITY_METHODS = {}
def get_identity_func(name):
try:
return IDENTITY_METHODS[name]
except KeyError:
raise NotImplementedError('No identity getter for {0}'.format(name))
def register_identity(name):
def _register_identity(func):
IDENTITY_METHODS[name] = func
return func
return _register_identity
def get_identity(name, **kwargs):
return get_identity_func(name)(**kwargs)
@register_identity('rest')
@asyncio.coroutine
def fetch_rest_identity(**params):
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
|
import asyncio
+
+ import aiohttp
from waterbutler import settings
+ IDENTITY_METHODS = {}
+
+
+ def get_identity_func(name):
+ try:
+ return IDENTITY_METHODS[name]
+ except KeyError:
+ raise NotImplementedError('No identity getter for {0}'.format(name))
+
+
+ def register_identity(name):
+ def _register_identity(func):
+ IDENTITY_METHODS[name] = func
+ return func
+ return _register_identity
+
+
+ def get_identity(name, **kwargs):
+ return get_identity_func(name)(**kwargs)
+
+
+ @register_identity('rest')
@asyncio.coroutine
- def fetch_rest_identity(params):
+ def fetch_rest_identity(**params):
? ++
response = yield from aiohttp.request(
'get',
settings.IDENTITY_API_URL,
params=params,
headers={'Content-Type': 'application/json'},
)
# TOOD Handle Errors nicely
if response.status != 200:
data = yield from response.read()
raise web.HTTPError(response.status)
data = yield from response.json()
return data
-
- IDENTITY_METHODS = {
- 'rest': fetch_rest_identity
- }
-
- get_identity = IDENTITY_METHODS[settings.IDENTITY_METHOD]
|
cc08fcbb513224aafe6c04143a150d1019c032ef
|
setup_py2exe.py
|
setup_py2exe.py
|
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
plugin_data_path = 'plugins\\data\\trust_stores'
plugin_data_files = []
for file in os.listdir(plugin_data_path):
file = os.path.join(plugin_data_path, file)
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
data_files.append((plugin_data_path, plugin_data_files))
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
|
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
plugin_data_files = []
for file in os.listdir('plugins\\data\\trust_stores'):
file = os.path.join('plugins\\data\\trust_stores', file)
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
data_files.append(('data\\trust_stores', plugin_data_files))
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
|
Fix trust stores paths for py2exe builds
|
Fix trust stores paths for py2exe builds
|
Python
|
agpl-3.0
|
nabla-c0d3/sslyze
|
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
- plugin_data_path = 'plugins\\data\\trust_stores'
plugin_data_files = []
- for file in os.listdir(plugin_data_path):
+ for file in os.listdir('plugins\\data\\trust_stores'):
- file = os.path.join(plugin_data_path, file)
+ file = os.path.join('plugins\\data\\trust_stores', file)
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
- data_files.append((plugin_data_path, plugin_data_files))
+ data_files.append(('data\\trust_stores', plugin_data_files))
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
|
Fix trust stores paths for py2exe builds
|
## Code Before:
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
plugin_data_path = 'plugins\\data\\trust_stores'
plugin_data_files = []
for file in os.listdir(plugin_data_path):
file = os.path.join(plugin_data_path, file)
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
data_files.append((plugin_data_path, plugin_data_files))
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
## Instruction:
Fix trust stores paths for py2exe builds
## Code After:
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
plugin_data_files = []
for file in os.listdir('plugins\\data\\trust_stores'):
file = os.path.join('plugins\\data\\trust_stores', file)
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
data_files.append(('data\\trust_stores', plugin_data_files))
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
|
from distutils.core import setup
from glob import glob
import os
import py2exe
from setup import SSLYZE_SETUP
data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))]
# Trust Stores
- plugin_data_path = 'plugins\\data\\trust_stores'
plugin_data_files = []
- for file in os.listdir(plugin_data_path):
? ^ ^^ ^
+ for file in os.listdir('plugins\\data\\trust_stores'):
? + ^^^ +++++++ ^ ^^^^^
- file = os.path.join(plugin_data_path, file)
? ^ ^^ ^
+ file = os.path.join('plugins\\data\\trust_stores', file)
? + ^^^ +++++++ ^ ^^^^^
if os.path.isfile(file): # skip directories
plugin_data_files.append( file)
- data_files.append((plugin_data_path, plugin_data_files))
? ^^^^^^^ ^^ ^
+ data_files.append(('data\\trust_stores', plugin_data_files))
? ^ +++++++ ^ ^^^^^
sslyze_setup_py2exe = SSLYZE_SETUP.copy()
sslyze_setup_py2exe.update(
{
'console' : ['sslyze.py'],
'data_files' : data_files,
'zipfile' : None,
'options' : {'py2exe':{
#'skip_archive': True,
'bundle_files': 1,
}}
}
)
setup(**sslyze_setup_py2exe)
|
dae9d7d67aaf2ab8d39b232d243d860d9597bbd2
|
django_excel_tools/exceptions.py
|
django_excel_tools/exceptions.py
|
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
|
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
class SerializerConfigError(BaseExcelError):
pass
|
Add error when serializer setup has error
|
Add error when serializer setup has error
|
Python
|
mit
|
NorakGithub/django-excel-tools
|
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
+
+ class SerializerConfigError(BaseExcelError):
+ pass
+
|
Add error when serializer setup has error
|
## Code Before:
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
## Instruction:
Add error when serializer setup has error
## Code After:
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
class SerializerConfigError(BaseExcelError):
pass
|
class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
+
+
+ class SerializerConfigError(BaseExcelError):
+ pass
|
56661432ea78f193346fe8bcf33bd19a2e1787bc
|
tests/test_manager.py
|
tests/test_manager.py
|
def test_ensure_authority(manager_transaction):
authority = manager_transaction.ensure_authority(
name='Test Authority',
rank=0,
cardinality=1234
)
assert authority.name == 'Test Authority'
assert authority.rank == 0
assert authority.cardinality == 1234
|
def test_ensure_authority(manager_transaction):
authority1 = manager_transaction.ensure_authority(
name='Test Authority',
rank=0,
cardinality=1234
)
assert authority1.name == 'Test Authority'
assert authority1.rank == 0
assert authority1.cardinality == 1234
authority2 = manager_transaction.ensure_authority(
name='Test Authority',
rank=1,
cardinality=2345
)
assert authority1 is authority2
assert authority2.name == 'Test Authority'
assert authority2.rank == 1
assert authority2.cardinality == 2345
|
Test ensure_authority for both nonexistent and already existing Authority records.
|
Test ensure_authority for both nonexistent and already existing Authority records.
|
Python
|
mit
|
scolby33/OCSPdash,scolby33/OCSPdash,scolby33/OCSPdash
|
def test_ensure_authority(manager_transaction):
- authority = manager_transaction.ensure_authority(
+ authority1 = manager_transaction.ensure_authority(
name='Test Authority',
rank=0,
cardinality=1234
)
+ assert authority1.name == 'Test Authority'
+ assert authority1.rank == 0
+ assert authority1.cardinality == 1234
+ authority2 = manager_transaction.ensure_authority(
+ name='Test Authority',
+ rank=1,
+ cardinality=2345
+ )
+ assert authority1 is authority2
- assert authority.name == 'Test Authority'
+ assert authority2.name == 'Test Authority'
- assert authority.rank == 0
+ assert authority2.rank == 1
- assert authority.cardinality == 1234
+ assert authority2.cardinality == 2345
|
Test ensure_authority for both nonexistent and already existing Authority records.
|
## Code Before:
def test_ensure_authority(manager_transaction):
authority = manager_transaction.ensure_authority(
name='Test Authority',
rank=0,
cardinality=1234
)
assert authority.name == 'Test Authority'
assert authority.rank == 0
assert authority.cardinality == 1234
## Instruction:
Test ensure_authority for both nonexistent and already existing Authority records.
## Code After:
def test_ensure_authority(manager_transaction):
authority1 = manager_transaction.ensure_authority(
name='Test Authority',
rank=0,
cardinality=1234
)
assert authority1.name == 'Test Authority'
assert authority1.rank == 0
assert authority1.cardinality == 1234
authority2 = manager_transaction.ensure_authority(
name='Test Authority',
rank=1,
cardinality=2345
)
assert authority1 is authority2
assert authority2.name == 'Test Authority'
assert authority2.rank == 1
assert authority2.cardinality == 2345
|
def test_ensure_authority(manager_transaction):
- authority = manager_transaction.ensure_authority(
+ authority1 = manager_transaction.ensure_authority(
? +
name='Test Authority',
rank=0,
cardinality=1234
)
+ assert authority1.name == 'Test Authority'
+ assert authority1.rank == 0
+ assert authority1.cardinality == 1234
+ authority2 = manager_transaction.ensure_authority(
+ name='Test Authority',
+ rank=1,
+ cardinality=2345
+ )
+ assert authority1 is authority2
- assert authority.name == 'Test Authority'
+ assert authority2.name == 'Test Authority'
? +
- assert authority.rank == 0
? ^
+ assert authority2.rank == 1
? + ^
- assert authority.cardinality == 1234
? -
+ assert authority2.cardinality == 2345
? + +
|
f2a88e4849876970c29b568b897dff88ffe09306
|
djrichtextfield/urls.py
|
djrichtextfield/urls.py
|
from django.conf.urls import url
from djrichtextfield.views import InitView
urlpatterns = [
url('^init.js$', InitView.as_view(), name='djrichtextfield_init')
]
|
from django.urls import path
from djrichtextfield.views import InitView
urlpatterns = [
path('init.js', InitView.as_view(), name='djrichtextfield_init')
]
|
Use path instead of soon to be deprecated url
|
Use path instead of soon to be deprecated url
|
Python
|
mit
|
jaap3/django-richtextfield,jaap3/django-richtextfield
|
- from django.conf.urls import url
+ from django.urls import path
from djrichtextfield.views import InitView
urlpatterns = [
- url('^init.js$', InitView.as_view(), name='djrichtextfield_init')
+ path('init.js', InitView.as_view(), name='djrichtextfield_init')
]
|
Use path instead of soon to be deprecated url
|
## Code Before:
from django.conf.urls import url
from djrichtextfield.views import InitView
urlpatterns = [
url('^init.js$', InitView.as_view(), name='djrichtextfield_init')
]
## Instruction:
Use path instead of soon to be deprecated url
## Code After:
from django.urls import path
from djrichtextfield.views import InitView
urlpatterns = [
path('init.js', InitView.as_view(), name='djrichtextfield_init')
]
|
- from django.conf.urls import url
? ----- ^^^
+ from django.urls import path
? ^^^^
from djrichtextfield.views import InitView
urlpatterns = [
- url('^init.js$', InitView.as_view(), name='djrichtextfield_init')
? ^^^ - -
+ path('init.js', InitView.as_view(), name='djrichtextfield_init')
? ^^^^
]
|
257134bdaea7c250d5956c4095adf0b917b65aa6
|
database/dict_converters/event_details_converter.py
|
database/dict_converters/event_details_converter.py
|
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections,
'district_points': event_details.district_points,
'rankings': event_details.renderable_rankings,
'stats': event_details.matchstats,
}
return event_details_dict
|
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections if event_details else None,
'district_points': event_details.district_points if event_details else None,
'rankings': event_details.renderable_rankings if event_details else None,
'stats': event_details.matchstats if event_details else None,
}
return event_details_dict
|
Fix null case for event details
|
Fix null case for event details
|
Python
|
mit
|
verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,phil-lopreiato/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,jaredhasenklein/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,the-blue-alliance/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,tsteward/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,fangeugene/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,tsteward/the-blue-alliance,phil-lopreiato/the-blue-alliance,fangeugene/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,fangeugene/the-blue-alliance,bdaroz/the-blue-alliance
|
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
- 'alliances': event_details.alliance_selections,
+ 'alliances': event_details.alliance_selections if event_details else None,
- 'district_points': event_details.district_points,
+ 'district_points': event_details.district_points if event_details else None,
- 'rankings': event_details.renderable_rankings,
+ 'rankings': event_details.renderable_rankings if event_details else None,
- 'stats': event_details.matchstats,
+ 'stats': event_details.matchstats if event_details else None,
}
return event_details_dict
|
Fix null case for event details
|
## Code Before:
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections,
'district_points': event_details.district_points,
'rankings': event_details.renderable_rankings,
'stats': event_details.matchstats,
}
return event_details_dict
## Instruction:
Fix null case for event details
## Code After:
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
'alliances': event_details.alliance_selections if event_details else None,
'district_points': event_details.district_points if event_details else None,
'rankings': event_details.renderable_rankings if event_details else None,
'stats': event_details.matchstats if event_details else None,
}
return event_details_dict
|
from database.dict_converters.converter_base import ConverterBase
class EventDetailsConverter(ConverterBase):
SUBVERSIONS = { # Increment every time a change to the dict is made
3: 0,
}
@classmethod
def convert(cls, event_details, dict_version):
CONVERTERS = {
3: cls.eventDetailsConverter_v3,
}
return CONVERTERS[dict_version](event_details)
@classmethod
def eventDetailsConverter_v3(cls, event_details):
event_details_dict = {
- 'alliances': event_details.alliance_selections,
+ 'alliances': event_details.alliance_selections if event_details else None,
? +++++++++++++++++++++++++++
- 'district_points': event_details.district_points,
+ 'district_points': event_details.district_points if event_details else None,
? +++++++++++++++++++++++++++
- 'rankings': event_details.renderable_rankings,
+ 'rankings': event_details.renderable_rankings if event_details else None,
? +++++++++++++++++++++++++++
- 'stats': event_details.matchstats,
+ 'stats': event_details.matchstats if event_details else None,
? +++++++++++++++++++++++++++
}
return event_details_dict
|
eb1fdf3419bdfd1d5920d73a877f707162b783b0
|
cfgrib/__init__.py
|
cfgrib/__init__.py
|
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
from .dataset import (
Dataset,
DatasetBuildError,
open_container,
open_file,
open_fileindex,
open_from_index,
)
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
|
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
from .dataset import Dataset, DatasetBuildError, open_container, open_file, open_from_index
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
|
Drop unused and dangerous entrypoint `open_fileindex`
|
Drop unused and dangerous entrypoint `open_fileindex`
|
Python
|
apache-2.0
|
ecmwf/cfgrib
|
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
+ from .dataset import Dataset, DatasetBuildError, open_container, open_file, open_from_index
- from .dataset import (
- Dataset,
- DatasetBuildError,
- open_container,
- open_file,
- open_fileindex,
- open_from_index,
- )
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
|
Drop unused and dangerous entrypoint `open_fileindex`
|
## Code Before:
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
from .dataset import (
Dataset,
DatasetBuildError,
open_container,
open_file,
open_fileindex,
open_from_index,
)
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
## Instruction:
Drop unused and dangerous entrypoint `open_fileindex`
## Code After:
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
from .dataset import Dataset, DatasetBuildError, open_container, open_file, open_from_index
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
|
__version__ = "0.9.9.2.dev0"
# cfgrib core API depends on the ECMWF ecCodes C-library only
from .cfmessage import CfMessage
+ from .dataset import Dataset, DatasetBuildError, open_container, open_file, open_from_index
- from .dataset import (
- Dataset,
- DatasetBuildError,
- open_container,
- open_file,
- open_fileindex,
- open_from_index,
- )
from .messages import FileStream, Message
# NOTE: xarray is not a hard dependency, but let's provide helpers if it is available.
try:
from .xarray_store import open_dataset, open_datasets
except ImportError:
pass
|
1636757f52a553c99fb40059f4461e97485d2199
|
fits/make_fit_feedmes.py
|
fits/make_fit_feedmes.py
|
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
os.chdir(id)
feedmes = glob('fit*diff')
# output starting models
for f in feedmes:
template = r'fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
cmd = matchobj.expand('patch -o \g<0>.galfit ../A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
os.chdir('..')
if __name__ =='__main__':
make_feedmes()
|
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
feedmes = glob(id+'fit*diff')
# output starting models
for f in feedmes:
template = r'.*fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
cmd = matchobj.expand('patch -o \g<0>.galfit A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
if __name__ =='__main__':
make_feedmes()
|
Fix to work with new patch
|
Fix to work with new patch
|
Python
|
mit
|
MegaMorph/galfitm-illustrations,MegaMorph/galfitm-illustrations
|
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
- os.chdir(id)
- feedmes = glob('fit*diff')
+ feedmes = glob(id+'fit*diff')
# output starting models
for f in feedmes:
- template = r'fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
+ template = r'.*fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
- cmd = matchobj.expand('patch -o \g<0>.galfit ../A\g<2>/'
+ cmd = matchobj.expand('patch -o \g<0>.galfit A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
- os.chdir('..')
if __name__ =='__main__':
make_feedmes()
|
Fix to work with new patch
|
## Code Before:
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
os.chdir(id)
feedmes = glob('fit*diff')
# output starting models
for f in feedmes:
template = r'fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
cmd = matchobj.expand('patch -o \g<0>.galfit ../A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
os.chdir('..')
if __name__ =='__main__':
make_feedmes()
## Instruction:
Fix to work with new patch
## Code After:
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
feedmes = glob(id+'fit*diff')
# output starting models
for f in feedmes:
template = r'.*fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
cmd = matchobj.expand('patch -o \g<0>.galfit A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
if __name__ =='__main__':
make_feedmes()
|
from glob import glob
import os
import re
def make_feedmes():
# Used to convert all the fit*.galfit files to fit*.diff
ids = glob('*/')
for id in ids:
- os.chdir(id)
- feedmes = glob('fit*diff')
+ feedmes = glob(id+'fit*diff')
? +++
# output starting models
for f in feedmes:
- template = r'fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
+ template = r'.*fit(.*)(\d)(n|m){0,1}([ugrizYJHK]{0,1})([abcde]{0,1})'
? ++
matchobj = re.match(template, f)
if matchobj.group(1) != 'A' or matchobj.group(5) != '':
- cmd = matchobj.expand('patch -o \g<0>.galfit ../A\g<2>/'
? ---
+ cmd = matchobj.expand('patch -o \g<0>.galfit A\g<2>/'
'fitA\g<2>\g<4>.galfit \g<0>.diff')
os.system(cmd)
- os.chdir('..')
if __name__ =='__main__':
make_feedmes()
|
1cf7b11cdb12a135f2dfa99d7e625eb160b0d7c2
|
apps/orders/models.py
|
apps/orders/models.py
|
from django.db import models
# Create your models here.
|
from django.db import models
from ..shop.models import Product
class Order(models.Model):
first_name = models.CharField(verbose_name="Ім,я", max_length=50)
last_name = models.CharField(verbose_name="Прізвище", max_length=50)
email = models.EmailField(verbose_name="Email")
address = models.CharField(verbose_name="Адреса", max_length=250)
postal_code = models.CharField(verbose_name="Поштовий код", max_length=20)
city = models.CharField(verbose_name="Місто", max_length=100)
created = models.DateTimeField(verbose_name="Створене", auto_now_add=True)
updated = models.DateTimeField(verbose_name="Оновлене", auto_now=True)
paid = models.BooleanField(verbose_name="Оплачене", default=False)
class Meta:
ordering = ('-created', )
verbose_name = "Замовлення"
verbose_name_plural = "Замовлення"
def __str__(self):
return "Замовлення: {}".format(self.id)
def get_total_cost(self):
return sum(item.get_cost() for item in self.items.all())
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name="items")
product = models.ForeignKey(Product, related_name="order_items")
price = models.DecimalField(verbose_name="Ціна", max_digits=10,
decimal_places=2)
quantity = models.PositiveIntegerField(verbose_name="К-сть", default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
|
Create Order and OrderItem Models
|
Create Order and OrderItem Models
|
Python
|
mit
|
samitnuk/online_shop,samitnuk/online_shop,samitnuk/online_shop
|
from django.db import models
- # Create your models here.
+ from ..shop.models import Product
+
+ class Order(models.Model):
+ first_name = models.CharField(verbose_name="Ім,я", max_length=50)
+ last_name = models.CharField(verbose_name="Прізвище", max_length=50)
+ email = models.EmailField(verbose_name="Email")
+ address = models.CharField(verbose_name="Адреса", max_length=250)
+ postal_code = models.CharField(verbose_name="Поштовий код", max_length=20)
+ city = models.CharField(verbose_name="Місто", max_length=100)
+ created = models.DateTimeField(verbose_name="Створене", auto_now_add=True)
+ updated = models.DateTimeField(verbose_name="Оновлене", auto_now=True)
+ paid = models.BooleanField(verbose_name="Оплачене", default=False)
+
+ class Meta:
+ ordering = ('-created', )
+ verbose_name = "Замовлення"
+ verbose_name_plural = "Замовлення"
+
+ def __str__(self):
+ return "Замовлення: {}".format(self.id)
+
+ def get_total_cost(self):
+ return sum(item.get_cost() for item in self.items.all())
+
+
+ class OrderItem(models.Model):
+ order = models.ForeignKey(Order, related_name="items")
+ product = models.ForeignKey(Product, related_name="order_items")
+ price = models.DecimalField(verbose_name="Ціна", max_digits=10,
+ decimal_places=2)
+ quantity = models.PositiveIntegerField(verbose_name="К-сть", default=1)
+
+ def __str__(self):
+ return '{}'.format(self.id)
+
+ def get_cost(self):
+ return self.price * self.quantity
+
|
Create Order and OrderItem Models
|
## Code Before:
from django.db import models
# Create your models here.
## Instruction:
Create Order and OrderItem Models
## Code After:
from django.db import models
from ..shop.models import Product
class Order(models.Model):
first_name = models.CharField(verbose_name="Ім,я", max_length=50)
last_name = models.CharField(verbose_name="Прізвище", max_length=50)
email = models.EmailField(verbose_name="Email")
address = models.CharField(verbose_name="Адреса", max_length=250)
postal_code = models.CharField(verbose_name="Поштовий код", max_length=20)
city = models.CharField(verbose_name="Місто", max_length=100)
created = models.DateTimeField(verbose_name="Створене", auto_now_add=True)
updated = models.DateTimeField(verbose_name="Оновлене", auto_now=True)
paid = models.BooleanField(verbose_name="Оплачене", default=False)
class Meta:
ordering = ('-created', )
verbose_name = "Замовлення"
verbose_name_plural = "Замовлення"
def __str__(self):
return "Замовлення: {}".format(self.id)
def get_total_cost(self):
return sum(item.get_cost() for item in self.items.all())
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name="items")
product = models.ForeignKey(Product, related_name="order_items")
price = models.DecimalField(verbose_name="Ціна", max_digits=10,
decimal_places=2)
quantity = models.PositiveIntegerField(verbose_name="К-сть", default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
|
from django.db import models
- # Create your models here.
+ from ..shop.models import Product
+
+
+ class Order(models.Model):
+ first_name = models.CharField(verbose_name="Ім,я", max_length=50)
+ last_name = models.CharField(verbose_name="Прізвище", max_length=50)
+ email = models.EmailField(verbose_name="Email")
+ address = models.CharField(verbose_name="Адреса", max_length=250)
+ postal_code = models.CharField(verbose_name="Поштовий код", max_length=20)
+ city = models.CharField(verbose_name="Місто", max_length=100)
+ created = models.DateTimeField(verbose_name="Створене", auto_now_add=True)
+ updated = models.DateTimeField(verbose_name="Оновлене", auto_now=True)
+ paid = models.BooleanField(verbose_name="Оплачене", default=False)
+
+ class Meta:
+ ordering = ('-created', )
+ verbose_name = "Замовлення"
+ verbose_name_plural = "Замовлення"
+
+ def __str__(self):
+ return "Замовлення: {}".format(self.id)
+
+ def get_total_cost(self):
+ return sum(item.get_cost() for item in self.items.all())
+
+
+ class OrderItem(models.Model):
+ order = models.ForeignKey(Order, related_name="items")
+ product = models.ForeignKey(Product, related_name="order_items")
+ price = models.DecimalField(verbose_name="Ціна", max_digits=10,
+ decimal_places=2)
+ quantity = models.PositiveIntegerField(verbose_name="К-сть", default=1)
+
+ def __str__(self):
+ return '{}'.format(self.id)
+
+ def get_cost(self):
+ return self.price * self.quantity
|
96dc9e590b81926fddb83a85a1352039c10c1509
|
links/mlp.py
|
links/mlp.py
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
for l in self.hidden_layers:
h = F.relu(l(h))
return self.output(h)
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
if self.hidden_sizes:
for l in self.hidden_layers:
h = F.relu(l(h))
return self.output(h)
|
Support configuration with no hidden-layer
|
Support configuration with no hidden-layer
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
+ if self.hidden_sizes:
- for l in self.hidden_layers:
+ for l in self.hidden_layers:
- h = F.relu(l(h))
+ h = F.relu(l(h))
return self.output(h)
|
Support configuration with no hidden-layer
|
## Code Before:
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
for l in self.hidden_layers:
h = F.relu(l(h))
return self.output(h)
## Instruction:
Support configuration with no hidden-layer
## Code After:
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
if self.hidden_sizes:
for l in self.hidden_layers:
h = F.relu(l(h))
return self.output(h)
|
from __future__ import division
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from builtins import super
from builtins import range
from future import standard_library
standard_library.install_aliases()
import random
import numpy as np
import chainer
from chainer import functions as F
from chainer import links as L
from chainer import cuda
class MLP(chainer.Chain):
"""Multi-Layer Perceptron"""
def __init__(self, in_size, out_size, hidden_sizes):
self.in_size = in_size
self.out_size = out_size
self.hidden_sizes = hidden_sizes
layers = {}
if hidden_sizes:
hidden_layers = []
hidden_layers.append(L.Linear(in_size, hidden_sizes[0]))
for hin, hout in zip(hidden_sizes, hidden_sizes[1:]):
hidden_layers.append(L.Linear(hin, hout))
layers['hidden_layers'] = chainer.ChainList(*hidden_layers)
layers['output'] = L.Linear(hidden_sizes[-1], out_size)
else:
layers['output'] = L.Linear(in_size, out_size)
super().__init__(**layers)
def __call__(self, x, test=False):
h = x
+ if self.hidden_sizes:
- for l in self.hidden_layers:
+ for l in self.hidden_layers:
? ++++
- h = F.relu(l(h))
+ h = F.relu(l(h))
? ++++
return self.output(h)
|
0298ace270749a6de89595a5bb566739dc63b16e
|
jsk_apc2016_common/scripts/install_trained_data.py
|
jsk_apc2016_common/scripts/install_trained_data.py
|
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
if __name__ == '__main__':
main()
|
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
download_data(
pkg_name=PKG,
path='trained_data/vgg16_rotation_translation_brightness_372000.chainermodel',
url='https://drive.google.com/open?id=0B9P1L--7Wd2veHZKRkFwZjRiZDQ',
md5='58a0e819ba141a34b1d68cc5e972615b',
)
if __name__ == '__main__':
main()
|
Add vgg16 trained_data to download
|
Add vgg16 trained_data to download
|
Python
|
bsd-3-clause
|
pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc,pazeshun/jsk_apc
|
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
+ download_data(
+ pkg_name=PKG,
+ path='trained_data/vgg16_rotation_translation_brightness_372000.chainermodel',
+ url='https://drive.google.com/open?id=0B9P1L--7Wd2veHZKRkFwZjRiZDQ',
+ md5='58a0e819ba141a34b1d68cc5e972615b',
+ )
+
if __name__ == '__main__':
main()
|
Add vgg16 trained_data to download
|
## Code Before:
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
if __name__ == '__main__':
main()
## Instruction:
Add vgg16 trained_data to download
## Code After:
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
download_data(
pkg_name=PKG,
path='trained_data/vgg16_rotation_translation_brightness_372000.chainermodel',
url='https://drive.google.com/open?id=0B9P1L--7Wd2veHZKRkFwZjRiZDQ',
md5='58a0e819ba141a34b1d68cc5e972615b',
)
if __name__ == '__main__':
main()
|
from jsk_data import download_data
def main():
PKG = 'jsk_apc2016_common'
download_data(
pkg_name=PKG,
path='trained_data/vgg16_96000.chainermodel',
url='https://drive.google.com/uc?id=0B9P1L--7Wd2vOTdzOGlJcGM1N00',
md5='3c993d333cf554684b5162c9f69b20cf',
)
+ download_data(
+ pkg_name=PKG,
+ path='trained_data/vgg16_rotation_translation_brightness_372000.chainermodel',
+ url='https://drive.google.com/open?id=0B9P1L--7Wd2veHZKRkFwZjRiZDQ',
+ md5='58a0e819ba141a34b1d68cc5e972615b',
+ )
+
if __name__ == '__main__':
main()
|
1dca7eeb036423d1d5889e5ec084f9f91f90eb74
|
spacy/tests/regression/test_issue957.py
|
spacy/tests/regression/test_issue957.py
|
import pytest
from ... import load as load_spacy
def test_issue913(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
from __future__ import unicode_literals
import pytest
from ... import load as load_spacy
def test_issue957(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
Add unicode declaration on new regression test
|
Add unicode declaration on new regression test
|
Python
|
mit
|
honnibal/spaCy,raphael0202/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,honnibal/spaCy,honnibal/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,Gregory-Howard/spaCy,oroszgy/spaCy.hu,recognai/spaCy,Gregory-Howard/spaCy,explosion/spaCy,spacy-io/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,oroszgy/spaCy.hu,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,spacy-io/spaCy,aikramer2/spaCy,aikramer2/spaCy,explosion/spaCy,Gregory-Howard/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,spacy-io/spaCy,raphael0202/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy
|
+ from __future__ import unicode_literals
+
import pytest
from ... import load as load_spacy
- def test_issue913(en_tokenizer):
+ def test_issue957(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
Add unicode declaration on new regression test
|
## Code Before:
import pytest
from ... import load as load_spacy
def test_issue913(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
## Instruction:
Add unicode declaration on new regression test
## Code After:
from __future__ import unicode_literals
import pytest
from ... import load as load_spacy
def test_issue957(en_tokenizer):
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
+ from __future__ import unicode_literals
+
import pytest
from ... import load as load_spacy
- def test_issue913(en_tokenizer):
? ^^
+ def test_issue957(en_tokenizer):
? ^^
'''Test that spaCy doesn't hang on many periods.'''
string = '0'
for i in range(1, 100):
string += '.%d' % i
doc = en_tokenizer(string)
# Don't want tests to fail if they haven't installed pytest-timeout plugin
try:
test_issue913 = pytest.mark.timeout(5)(test_issue913)
except NameError:
pass
|
c2ae6fb563b1ecc20b11ec6d693bad8a7f9e8945
|
scrapple/utils/exceptions.py
|
scrapple/utils/exceptions.py
|
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
return
|
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
if int(args['--levels']) < 1:
raise Exception("--levels should be greater than, or equal to 1")
return
|
Update exception handling for levels argument
|
Update exception handling for levels argument
|
Python
|
mit
|
scrappleapp/scrapple,AlexMathew/scrapple,AlexMathew/scrapple,scrappleapp/scrapple,AlexMathew/scrapple
|
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
+ if int(args['--levels']) < 1:
+ raise Exception("--levels should be greater than, or equal to 1")
return
|
Update exception handling for levels argument
|
## Code Before:
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
return
## Instruction:
Update exception handling for levels argument
## Code After:
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
if int(args['--levels']) < 1:
raise Exception("--levels should be greater than, or equal to 1")
return
|
import re
def handle_exceptions(args):
"""
Validates the arguments passed through the CLI commands.
:param args: The arguments passed in the CLI, parsed by the docopt module
:return: None
"""
projectname_re = re.compile(r'[^a-zA-Z0-9_]')
if args['genconfig']:
if args['--type'] not in ['scraper', 'crawler']:
raise Exception("--type has to be 'scraper' or 'crawler'")
if args['--selector'] not in ['xpath', 'css']:
raise Exception("--selector has to be 'xpath' or 'css'")
if args['generate'] or args['run']:
if args['--output_type'] not in ['json', 'csv']:
raise Exception("--output_type has to be 'json' or 'csv'")
if args['genconfig'] or args['generate'] or args['run']:
if projectname_re.search(args['<projectname>']) is not None:
raise Exception("<projectname> should consist of letters, digits or _")
+ if int(args['--levels']) < 1:
+ raise Exception("--levels should be greater than, or equal to 1")
return
|
ffb42ba8e9b0a5d7a269ee9d13a5347f4ffee563
|
mama_cas/tests/test_callbacks.py
|
mama_cas/tests/test_callbacks.py
|
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
url = 'http://www.example.com/'
def setUp(self):
self.user = UserFactory()
def test_user_name(self):
"""
The callback should return a username and full_name
attribute.
"""
attributes = user_name_attributes(self.user, self.url)
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
def test_user_model_attributes(self):
"""
The callback should return at least a username attribute.
"""
attributes = user_model_attributes(self.user, self.url)
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
|
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
def setUp(self):
self.user = UserFactory()
def test_user_name_attributes(self):
"""
The callback should return a username, full_name and
short_name attribute.
"""
attributes = user_name_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
self.assertIn('short_name', attributes)
self.assertEqual(attributes['short_name'], 'Ellen')
def test_user_model_attributes(self):
"""The callback should return at least a username attribute."""
attributes = user_model_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
|
Test short_name for user name attributes callback
|
Test short_name for user name attributes callback
|
Python
|
bsd-3-clause
|
jbittel/django-mama-cas,orbitvu/django-mama-cas,orbitvu/django-mama-cas,jbittel/django-mama-cas
|
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
- url = 'http://www.example.com/'
-
def setUp(self):
self.user = UserFactory()
- def test_user_name(self):
+ def test_user_name_attributes(self):
"""
- The callback should return a username and full_name
+ The callback should return a username, full_name and
- attribute.
+ short_name attribute.
"""
- attributes = user_name_attributes(self.user, self.url)
+ attributes = user_name_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
+ self.assertIn('short_name', attributes)
+ self.assertEqual(attributes['short_name'], 'Ellen')
def test_user_model_attributes(self):
- """
- The callback should return at least a username attribute.
+ """The callback should return at least a username attribute."""
- """
- attributes = user_model_attributes(self.user, self.url)
+ attributes = user_model_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
|
Test short_name for user name attributes callback
|
## Code Before:
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
url = 'http://www.example.com/'
def setUp(self):
self.user = UserFactory()
def test_user_name(self):
"""
The callback should return a username and full_name
attribute.
"""
attributes = user_name_attributes(self.user, self.url)
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
def test_user_model_attributes(self):
"""
The callback should return at least a username attribute.
"""
attributes = user_model_attributes(self.user, self.url)
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
## Instruction:
Test short_name for user name attributes callback
## Code After:
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
def setUp(self):
self.user = UserFactory()
def test_user_name_attributes(self):
"""
The callback should return a username, full_name and
short_name attribute.
"""
attributes = user_name_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
self.assertIn('short_name', attributes)
self.assertEqual(attributes['short_name'], 'Ellen')
def test_user_model_attributes(self):
"""The callback should return at least a username attribute."""
attributes = user_model_attributes(self.user, 'http://www.example.com/')
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
|
from django.test import TestCase
from .factories import UserFactory
from mama_cas.callbacks import user_model_attributes
from mama_cas.callbacks import user_name_attributes
class CallbacksTests(TestCase):
- url = 'http://www.example.com/'
-
def setUp(self):
self.user = UserFactory()
- def test_user_name(self):
+ def test_user_name_attributes(self):
? +++++++++++
"""
- The callback should return a username and full_name
? ^^^^
+ The callback should return a username, full_name and
? ^ ++++
- attribute.
+ short_name attribute.
? +++++++++++
"""
- attributes = user_name_attributes(self.user, self.url)
? ^ ^ ^^^
+ attributes = user_name_attributes(self.user, 'http://www.example.com/')
? ^^^^^^^^^^^^ ++++ ^ ^^^^^
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
self.assertIn('full_name', attributes)
self.assertEqual(attributes['full_name'], 'Ellen Cohen')
+ self.assertIn('short_name', attributes)
+ self.assertEqual(attributes['short_name'], 'Ellen')
def test_user_model_attributes(self):
- """
- The callback should return at least a username attribute.
+ """The callback should return at least a username attribute."""
? +++ +++
- """
- attributes = user_model_attributes(self.user, self.url)
? ^ ^ ^^^
+ attributes = user_model_attributes(self.user, 'http://www.example.com/')
? ^^^^^^^^^^^^ ++++ ^ ^^^^^
self.assertIn('username', attributes)
self.assertEqual(attributes['username'], 'ellen')
|
e33a68f14a13c0340b2dfcbb13931d2185735951
|
scripts/nanopolish_makerange.py
|
scripts/nanopolish_makerange.py
|
from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
Use Biopython's string based FASTA parser
|
Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension?
|
Python
|
mit
|
jts/nanopolish,jts/nanopolish,jts/nanopolish,jts/nanopolish,jts/nanopolish
|
from __future__ import print_function
import sys
import argparse
- from Bio import SeqIO
+ from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
- recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
+ with open(filename) as handle:
+ recs = [(title.split(None, 1)[0], len(seq))
+ for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
Use Biopython's string based FASTA parser
|
## Code Before:
from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
## Instruction:
Use Biopython's string based FASTA parser
## Code After:
from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
from __future__ import print_function
import sys
import argparse
- from Bio import SeqIO
+ from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
- recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
+ with open(filename) as handle:
+ recs = [(title.split(None, 1)[0], len(seq))
+ for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
f0f66aa917d9ec85cfbe2a0460b2d4b4d5ffe0eb
|
middleware/hat_manager.py
|
middleware/hat_manager.py
|
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
|
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
def set_message(self, msg):
self.sense.show_message(msg, scroll_speed=0.05)
|
Add a method to print a message on the sense hat
|
Add a method to print a message on the sense hat
|
Python
|
mit
|
ylerjen/pir-hat,ylerjen/pir-hat,ylerjen/pir-hat
|
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
+ def set_message(self, msg):
+ self.sense.show_message(msg, scroll_speed=0.05)
+
|
Add a method to print a message on the sense hat
|
## Code Before:
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
## Instruction:
Add a method to print a message on the sense hat
## Code After:
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
def set_message(self, msg):
self.sense.show_message(msg, scroll_speed=0.05)
|
class HatManager(object):
def __init__(self, sense):
self.sense = sense
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
def refresh_state(self):
self._pressure = self.sense.get_pressure()
self._temperature = self.sense.get_temperature()
self._humidity = self.sense.get_humidity()
@property
def get_humidity(self):
return self._humidity
@property
def get_temperature(self):
return self._temperature
@property
def get_pressure(self):
return self._pressure
+
+ def set_message(self, msg):
+ self.sense.show_message(msg, scroll_speed=0.05)
|
4446a700fcf057f83645b4861b5655773983511c
|
tests.py
|
tests.py
|
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
if __name__ == '__main__':
unittest.main()
|
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
# Make sure the expected files don't exist yet
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
def tearDown(self):
# So as not to leave a mess
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
Add teardown function as well
|
Add teardown function as well
|
Python
|
mit
|
orblivion/hellolabs_word_test
|
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
+ # Make sure the expected files don't exist yet
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
+ def tearDown(self):
+ # So as not to leave a mess
+ for fname in ["test_sequences", "test_words"]:
+ if os.path.exists(fname):
+ os.remove(fname)
+
if __name__ == '__main__':
unittest.main()
|
Add teardown function as well
|
## Code Before:
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
if __name__ == '__main__':
unittest.main()
## Instruction:
Add teardown function as well
## Code After:
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
# Make sure the expected files don't exist yet
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
def tearDown(self):
# So as not to leave a mess
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
if __name__ == '__main__':
unittest.main()
|
import unittest
import os
from main import generate_files
class WordsTest(unittest.TestCase):
def setUp(self):
+ # Make sure the expected files don't exist yet
for fname in ["test_sequences", "test_words"]:
if os.path.exists(fname):
os.remove(fname)
def test_files_created(self):
self.assertFalse(os.path.exists("test_sequences"))
self.assertFalse(os.path.exists("test_words"))
generate_files([], sequences_fname="test_sequences", words_fname="test_words")
self.assertTrue(os.path.exists("test_sequences"))
self.assertTrue(os.path.exists("test_words"))
+ def tearDown(self):
+ # So as not to leave a mess
+ for fname in ["test_sequences", "test_words"]:
+ if os.path.exists(fname):
+ os.remove(fname)
+
if __name__ == '__main__':
unittest.main()
|
372edf44efd7e028890e4623a950052a606bb123
|
shade/tests/functional/util.py
|
shade/tests/functional/util.py
|
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
|
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
# Enable running functional tests against rax - which requires
# performance flavors be used for boot from volume
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
if 'performance' in flavor.name:
return flavor
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
for image in images:
if image.name.lower().startswith('centos'):
return image
|
Enable running tests against RAX and IBM
|
Enable running tests against RAX and IBM
Rackspace requires performance flavors be used for boot from volume. IBM
does not have Ubuntu or Cirros images in the cloud.
Change-Id: I95c15d92072311eb4aa0a4b7f551a95c4dc6e082
|
Python
|
apache-2.0
|
dtroyer/python-openstacksdk,openstack/python-openstacksdk,stackforge/python-openstacksdk,openstack-infra/shade,dtroyer/python-openstacksdk,openstack-infra/shade,stackforge/python-openstacksdk,openstack/python-openstacksdk
|
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
+ # Enable running functional tests against rax - which requires
+ # performance flavors be used for boot from volume
+ for flavor in sorted(
+ flavors,
+ key=operator.attrgetter('ram')):
+ if 'performance' in flavor.name:
+ return flavor
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
+ for image in images:
+ if image.name.lower().startswith('centos'):
+ return image
|
Enable running tests against RAX and IBM
|
## Code Before:
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
## Instruction:
Enable running tests against RAX and IBM
## Code After:
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
# Enable running functional tests against rax - which requires
# performance flavors be used for boot from volume
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
if 'performance' in flavor.name:
return flavor
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
for image in images:
if image.name.lower().startswith('centos'):
return image
|
import operator
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
+ # Enable running functional tests against rax - which requires
+ # performance flavors be used for boot from volume
+ for flavor in sorted(
+ flavors,
+ key=operator.attrgetter('ram')):
+ if 'performance' in flavor.name:
+ return flavor
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
def pick_image(images):
for image in images:
if image.name.startswith('cirros') and image.name.endswith('-uec'):
return image
for image in images:
if image.name.lower().startswith('ubuntu'):
return image
+ for image in images:
+ if image.name.lower().startswith('centos'):
+ return image
|
51c9413eb1375ff191e03d38933a772923fa55cf
|
app/__init__.py
|
app/__init__.py
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
bootstrap = Bootstrap()
def create_app(config_name):
application = Flask(__name__)
application.config['DEBUG'] = True
application.config.from_object(config[config_name])
config[config_name].init_app(application)
bootstrap.init_app(application)
from .main import main as main_blueprint
application.register_blueprint(main_blueprint)
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
from .main import main as main_blueprint
def create_app(config_name):
application = Flask(__name__,
static_folder='static/',
static_url_path=config[config_name].STATIC_URL_PATH)
application.config.from_object(config[config_name])
config[config_name].init_app(application)
bootstrap = Bootstrap()
bootstrap.init_app(application)
application.register_blueprint(main_blueprint,
url_prefix='/supplier')
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
|
Add /supplier prefix to main blueprint and static URLs
|
Add /supplier prefix to main blueprint and static URLs
|
Python
|
mit
|
alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
+ from .main import main as main_blueprint
-
- bootstrap = Bootstrap()
def create_app(config_name):
- application = Flask(__name__)
+ application = Flask(__name__,
- application.config['DEBUG'] = True
+ static_folder='static/',
+ static_url_path=config[config_name].STATIC_URL_PATH)
+
application.config.from_object(config[config_name])
config[config_name].init_app(application)
+ bootstrap = Bootstrap()
bootstrap.init_app(application)
- from .main import main as main_blueprint
- application.register_blueprint(main_blueprint)
+ application.register_blueprint(main_blueprint,
+ url_prefix='/supplier')
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
|
Add /supplier prefix to main blueprint and static URLs
|
## Code Before:
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
bootstrap = Bootstrap()
def create_app(config_name):
application = Flask(__name__)
application.config['DEBUG'] = True
application.config.from_object(config[config_name])
config[config_name].init_app(application)
bootstrap.init_app(application)
from .main import main as main_blueprint
application.register_blueprint(main_blueprint)
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
## Instruction:
Add /supplier prefix to main blueprint and static URLs
## Code After:
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
from .main import main as main_blueprint
def create_app(config_name):
application = Flask(__name__,
static_folder='static/',
static_url_path=config[config_name].STATIC_URL_PATH)
application.config.from_object(config[config_name])
config[config_name].init_app(application)
bootstrap = Bootstrap()
bootstrap.init_app(application)
application.register_blueprint(main_blueprint,
url_prefix='/supplier')
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
|
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from config import config
+ from .main import main as main_blueprint
-
- bootstrap = Bootstrap()
def create_app(config_name):
- application = Flask(__name__)
? ^
+ application = Flask(__name__,
? ^
- application.config['DEBUG'] = True
+ static_folder='static/',
+ static_url_path=config[config_name].STATIC_URL_PATH)
+
application.config.from_object(config[config_name])
config[config_name].init_app(application)
+ bootstrap = Bootstrap()
bootstrap.init_app(application)
- from .main import main as main_blueprint
- application.register_blueprint(main_blueprint)
? ^
+ application.register_blueprint(main_blueprint,
? ^
+ url_prefix='/supplier')
main_blueprint.config = {
'BASE_TEMPLATE_DATA': application.config['BASE_TEMPLATE_DATA']
}
return application
|
51701b35d9ef9401abf0d86fd5726e669326390d
|
scripts/nipy_4dto3D.py
|
scripts/nipy_4dto3D.py
|
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
import sys
import nipy.io.imageformats as nii
if __name__ == '__main__':
try:
fname = sys.argv[1]
except IndexError:
raise OSError('Expecting 4d image filename')
img = nii.load(fname)
imgs = nii.four_to_three(img)
froot, ext = os.path.splitext(fname)
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
|
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
import nipy.externals.argparse as argparse
import nipy.io.imageformats as nii
def main():
# create the parser
parser = argparse.ArgumentParser()
# add the arguments
parser.add_argument('filename', type=str,
help='4D image filename')
# parse the command line
args = parser.parse_args()
img = nii.load(args.filename)
imgs = nii.four_to_three(img)
froot, ext = os.path.splitext(args.filename)
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
if __name__ == '__main__':
main()
|
Use argparse for 4D to 3D
|
Use argparse for 4D to 3D
|
Python
|
bsd-3-clause
|
nipy/nipy-labs,arokem/nipy,bthirion/nipy,alexis-roche/register,arokem/nipy,alexis-roche/niseg,bthirion/nipy,alexis-roche/nipy,bthirion/nipy,nipy/nireg,alexis-roche/nireg,nipy/nipy-labs,alexis-roche/nipy,alexis-roche/nipy,bthirion/nipy,alexis-roche/register,alexis-roche/nireg,nipy/nireg,alexis-roche/register,alexis-roche/niseg,alexis-roche/nipy,arokem/nipy,arokem/nipy
|
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
- import sys
+ import nipy.externals.argparse as argparse
import nipy.io.imageformats as nii
- if __name__ == '__main__':
- try:
- fname = sys.argv[1]
- except IndexError:
- raise OSError('Expecting 4d image filename')
+ def main():
+ # create the parser
+ parser = argparse.ArgumentParser()
+ # add the arguments
+ parser.add_argument('filename', type=str,
+ help='4D image filename')
+ # parse the command line
+ args = parser.parse_args()
- img = nii.load(fname)
+ img = nii.load(args.filename)
imgs = nii.four_to_three(img)
- froot, ext = os.path.splitext(fname)
+ froot, ext = os.path.splitext(args.filename)
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
+
+
+ if __name__ == '__main__':
+ main()
+
|
Use argparse for 4D to 3D
|
## Code Before:
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
import sys
import nipy.io.imageformats as nii
if __name__ == '__main__':
try:
fname = sys.argv[1]
except IndexError:
raise OSError('Expecting 4d image filename')
img = nii.load(fname)
imgs = nii.four_to_three(img)
froot, ext = os.path.splitext(fname)
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
## Instruction:
Use argparse for 4D to 3D
## Code After:
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
import nipy.externals.argparse as argparse
import nipy.io.imageformats as nii
def main():
# create the parser
parser = argparse.ArgumentParser()
# add the arguments
parser.add_argument('filename', type=str,
help='4D image filename')
# parse the command line
args = parser.parse_args()
img = nii.load(args.filename)
imgs = nii.four_to_three(img)
froot, ext = os.path.splitext(args.filename)
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
if __name__ == '__main__':
main()
|
''' Tiny script to write 4D files in any format that we read (nifti,
analyze, MINC, at the moment, as nifti 3D files '''
import os
- import sys
+ import nipy.externals.argparse as argparse
import nipy.io.imageformats as nii
- if __name__ == '__main__':
- try:
- fname = sys.argv[1]
- except IndexError:
- raise OSError('Expecting 4d image filename')
+ def main():
+ # create the parser
+ parser = argparse.ArgumentParser()
+ # add the arguments
+ parser.add_argument('filename', type=str,
+ help='4D image filename')
+ # parse the command line
+ args = parser.parse_args()
- img = nii.load(fname)
+ img = nii.load(args.filename)
? +++++ +++
imgs = nii.four_to_three(img)
- froot, ext = os.path.splitext(fname)
+ froot, ext = os.path.splitext(args.filename)
? +++++ +++
if ext in ('.gz', '.bz2'):
froot, ext = os.path.splitext(froot)
for i, img3d in enumerate(imgs):
fname3d = '%s_%04d.nii' % (froot, i)
nii.save(img3d, fname3d)
+
+
+ if __name__ == '__main__':
+ main()
+
|
08335e060311994a897b95302fc54a0a2b196614
|
mdx_linkify/__init__.py
|
mdx_linkify/__init__.py
|
from mdx_linkify.mdx_linkify import makeExtension
|
import sys
is_python3 = sys.version_info >= (3, 0)
if is_python3:
from mdx_linkify.mdx_linkify import makeExtension
else:
from mdx_linkify import makeExtension
assert makeExtension # Silences pep8.
|
Fix import for python2 and pypy
|
Fix import for python2 and pypy
|
Python
|
mit
|
daGrevis/mdx_linkify
|
- from mdx_linkify.mdx_linkify import makeExtension
+ import sys
+
+ is_python3 = sys.version_info >= (3, 0)
+
+ if is_python3:
+ from mdx_linkify.mdx_linkify import makeExtension
+ else:
+ from mdx_linkify import makeExtension
+
+
+ assert makeExtension # Silences pep8.
+
|
Fix import for python2 and pypy
|
## Code Before:
from mdx_linkify.mdx_linkify import makeExtension
## Instruction:
Fix import for python2 and pypy
## Code After:
import sys
is_python3 = sys.version_info >= (3, 0)
if is_python3:
from mdx_linkify.mdx_linkify import makeExtension
else:
from mdx_linkify import makeExtension
assert makeExtension # Silences pep8.
|
+ import sys
+
+
+ is_python3 = sys.version_info >= (3, 0)
+
+ if is_python3:
- from mdx_linkify.mdx_linkify import makeExtension
+ from mdx_linkify.mdx_linkify import makeExtension
? ++++
+ else:
+ from mdx_linkify import makeExtension
+
+
+ assert makeExtension # Silences pep8.
|
ea3e327bb602689e136479ce41f568aa2ee47cf4
|
databot/utils/html.py
|
databot/utils/html.py
|
import bs4
import cgi
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
return data['content'].decode(soup.original_encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
|
import bs4
import cgi
def get_page_encoding(soup, default_encoding=None):
for meta in soup.select('head > meta[http-equiv="Content-Type"]'):
content_type, params = cgi.parse_header(meta['content'])
if 'charset' in params:
return params['charset']
return default_encoding
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
encoding = get_page_encoding(soup, soup.original_encoding)
return data['content'].decode(encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
|
Improve detection of page encoding
|
Improve detection of page encoding
|
Python
|
agpl-3.0
|
sirex/databot,sirex/databot
|
import bs4
import cgi
+
+
+ def get_page_encoding(soup, default_encoding=None):
+ for meta in soup.select('head > meta[http-equiv="Content-Type"]'):
+ content_type, params = cgi.parse_header(meta['content'])
+ if 'charset' in params:
+ return params['charset']
+ return default_encoding
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
+ encoding = get_page_encoding(soup, soup.original_encoding)
- return data['content'].decode(soup.original_encoding, errors)
+ return data['content'].decode(encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
|
Improve detection of page encoding
|
## Code Before:
import bs4
import cgi
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
return data['content'].decode(soup.original_encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
## Instruction:
Improve detection of page encoding
## Code After:
import bs4
import cgi
def get_page_encoding(soup, default_encoding=None):
for meta in soup.select('head > meta[http-equiv="Content-Type"]'):
content_type, params = cgi.parse_header(meta['content'])
if 'charset' in params:
return params['charset']
return default_encoding
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
encoding = get_page_encoding(soup, soup.original_encoding)
return data['content'].decode(encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
|
import bs4
import cgi
+
+
+ def get_page_encoding(soup, default_encoding=None):
+ for meta in soup.select('head > meta[http-equiv="Content-Type"]'):
+ content_type, params = cgi.parse_header(meta['content'])
+ if 'charset' in params:
+ return params['charset']
+ return default_encoding
def get_content(data, errors='strict'):
headers = {k.lower(): v for k, v in data.get('headers', {}).items()}
content_type_header = headers.get('content-type', '')
content_type, params = cgi.parse_header(content_type_header)
if content_type.lower() in ('text/html', 'text/xml'):
soup = bs4.BeautifulSoup(data['content'], 'lxml', from_encoding=data['encoding'])
+ encoding = get_page_encoding(soup, soup.original_encoding)
- return data['content'].decode(soup.original_encoding, errors)
? --------------
+ return data['content'].decode(encoding, errors)
elif content_type.startswith('text/'):
return data['content'].decode(data['encoding'], errors)
else:
return data['content']
|
2e63438deb6f733e7e905f4ea299aa0bdce88b3c
|
changes/api/author_build_index.py
|
changes/api/author_build_index.py
|
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
return
return Author.query.filter_by(email=user.email).first()
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
return self.respond([])
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
|
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
from uuid import UUID
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
return None
return Author.query.filter_by(email=user.email).first()
try:
author_id = UUID(author_id)
except ValueError:
return None
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
return '', 404
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
|
Validate author_id and return 404 for missing data
|
Validate author_id and return 404 for missing data
|
Python
|
apache-2.0
|
wfxiang08/changes,dropbox/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,dropbox/changes,wfxiang08/changes,wfxiang08/changes,bowlofstew/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes
|
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
+ from uuid import UUID
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
- return
+ return None
return Author.query.filter_by(email=user.email).first()
+ try:
+ author_id = UUID(author_id)
+ except ValueError:
+ return None
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
- return self.respond([])
+ return '', 404
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
|
Validate author_id and return 404 for missing data
|
## Code Before:
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
return
return Author.query.filter_by(email=user.email).first()
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
return self.respond([])
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
## Instruction:
Validate author_id and return 404 for missing data
## Code After:
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
from uuid import UUID
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
return None
return Author.query.filter_by(email=user.email).first()
try:
author_id = UUID(author_id)
except ValueError:
return None
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
return '', 404
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
|
from __future__ import absolute_import, division, unicode_literals
from sqlalchemy.orm import joinedload
+ from uuid import UUID
from changes.api.base import APIView
from changes.api.auth import get_current_user
from changes.models import Author, Build
class AuthorBuildIndexAPIView(APIView):
def _get_author(self, author_id):
if author_id == 'me':
user = get_current_user()
if user is None:
- return
+ return None
? +++++
return Author.query.filter_by(email=user.email).first()
+ try:
+ author_id = UUID(author_id)
+ except ValueError:
+ return None
return Author.query.get(author_id)
def get(self, author_id):
if author_id == 'me' and not get_current_user():
return '', 401
author = self._get_author(author_id)
if not author:
- return self.respond([])
+ return '', 404
queryset = Build.query.options(
joinedload('project'),
joinedload('author'),
joinedload('source').joinedload('revision'),
).filter(
Build.author_id == author.id,
).order_by(Build.date_created.desc(), Build.date_started.desc())
return self.paginate(queryset)
def get_stream_channels(self, author_id):
author = self._get_author(author_id)
if not author:
return []
return ['authors:{0}:builds'.format(author.id.hex)]
|
69de2261c30a8bab1ac4d0749cf32baec49e0cc4
|
webapp/byceps/blueprints/board/views.py
|
webapp/byceps/blueprints/board/views.py
|
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
category = Category.query.get(id)
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
topic = Topic.query.get(id)
return {'topic': topic}
|
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
category = Category.query.get_or_404(id)
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
topic = Topic.query.get_or_404(id)
return {'topic': topic}
|
Throw 404 if category/topic with given id is not found.
|
Throw 404 if category/topic with given id is not found.
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,m-ober/byceps
|
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
- category = Category.query.get(id)
+ category = Category.query.get_or_404(id)
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
- topic = Topic.query.get(id)
+ topic = Topic.query.get_or_404(id)
return {'topic': topic}
|
Throw 404 if category/topic with given id is not found.
|
## Code Before:
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
category = Category.query.get(id)
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
topic = Topic.query.get(id)
return {'topic': topic}
## Instruction:
Throw 404 if category/topic with given id is not found.
## Code After:
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
category = Category.query.get_or_404(id)
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
topic = Topic.query.get_or_404(id)
return {'topic': topic}
|
from ...util.framework import create_blueprint
from ...util.templating import templated
from ..authorization.registry import permission_registry
from .authorization import BoardPostingPermission, BoardTopicPermission
from .models import Category, Topic
blueprint = create_blueprint('board', __name__)
permission_registry.register_enum('board_topic', BoardTopicPermission)
permission_registry.register_enum('board_posting', BoardPostingPermission)
@blueprint.route('/categories')
@templated
def category_index():
"""List categories."""
categories = Category.query.for_current_brand().all()
return {'categories': categories}
@blueprint.route('/categories/<id>')
@templated
def category_view(id):
"""List latest topics in the category."""
- category = Category.query.get(id)
+ category = Category.query.get_or_404(id)
? +++++++
return {'category': category}
@blueprint.route('/topics/<id>')
@templated
def topic_view(id):
"""List postings for the topic."""
- topic = Topic.query.get(id)
+ topic = Topic.query.get_or_404(id)
? +++++++
return {'topic': topic}
|
cc76b7658a62528137f14733731b6b3f3a541384
|
booster_bdd/features/steps/stackAnalyses.py
|
booster_bdd/features/steps/stackAnalyses.py
|
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
global sa
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
reportText = sa.getStackReport(stackAnalysesKey)
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
|
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
context.sa = sa
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
reportText = context.sa.getStackReport(stackAnalysesKey)
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
|
Store stack analysis in the context
|
Store stack analysis in the context
|
Python
|
apache-2.0
|
ldimaggi/fabric8-test,ldimaggi/fabric8-test,ldimaggi/fabric8-test,ldimaggi/fabric8-test,ldimaggi/fabric8-test,ldimaggi/fabric8-test
|
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
- global sa
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
+ context.sa = sa
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
- reportText = sa.getStackReport(stackAnalysesKey)
+ reportText = context.sa.getStackReport(stackAnalysesKey)
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
|
Store stack analysis in the context
|
## Code Before:
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
global sa
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
reportText = sa.getStackReport(stackAnalysesKey)
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
## Instruction:
Store stack analysis in the context
## Code After:
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
context.sa = sa
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
reportText = context.sa.getStackReport(stackAnalysesKey)
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
|
from behave import when, then
from features.src.support import helpers
from features.src.stackAnalyses import StackAnalyses
from pyshould import should_not
@when(u'I send Maven package manifest pom-effective.xml to stack analysis')
def when_send_manifest(context):
- global sa
sa = StackAnalyses()
spaceName = helpers.getSpaceName()
codebaseUrl = sa.getCodebaseUrl()
stackAnalysesKey = sa.getReportKey(codebaseUrl)
helpers.setStackReportKey(stackAnalysesKey)
stackAnalysesKey | should_not.be_none().desc("Obtained Stack Analyses key")
+ context.sa = sa
@then(u'I should receive JSON response with stack analysis data')
def then_receive_stack_json(context):
spaceName = helpers.getSpaceName()
stackAnalysesKey = helpers.getStackReportKey()
- reportText = sa.getStackReport(stackAnalysesKey)
+ reportText = context.sa.getStackReport(stackAnalysesKey)
? ++++++++
reportText | should_not.be_none().desc("Obtained Stack Analyses Report")
|
9f005120c6d408e8cf3097dd74d5dada24305c88
|
src/jsonlogger.py
|
src/jsonlogger.py
|
import logging
import json
import re
from datetime import datetime
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
mappings = {
'asctime': create_timestamp,
'message': lambda r: r.msg,
}
formatters = self.parse()
log_record = {}
for formatter in formatters:
try:
log_record[formatter] = mappings[formatter](record)
except KeyError:
log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
def create_timestamp(record):
"""Creates a human readable timestamp for a log records created date"""
timestamp = datetime.fromtimestamp(record.created)
return timestamp.strftime("%y-%m-%d %H:%M:%S,%f"),
|
import logging
import json
import re
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
formatters = self.parse()
record.message = record.getMessage()
# only format time if needed
if "asctime" in formatters:
record.asctime = self.formatTime(record, self.datefmt)
log_record = {}
for formatter in formatters:
log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
|
Use the same logic to format message and asctime than the standard library.
|
Use the same logic to format message and asctime than the standard library.
This way we producte better message text on some circumstances when not logging
a string and use the date formater from the base class that uses the date format
configured from a file or a dict.
|
Python
|
bsd-2-clause
|
madzak/python-json-logger,bbc/python-json-logger
|
import logging
import json
import re
- from datetime import datetime
+
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
- mappings = {
- 'asctime': create_timestamp,
- 'message': lambda r: r.msg,
- }
formatters = self.parse()
+ record.message = record.getMessage()
+ # only format time if needed
+ if "asctime" in formatters:
+ record.asctime = self.formatTime(record, self.datefmt)
+
log_record = {}
for formatter in formatters:
- try:
- log_record[formatter] = mappings[formatter](record)
- except KeyError:
- log_record[formatter] = record.__dict__[formatter]
+ log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
- def create_timestamp(record):
- """Creates a human readable timestamp for a log records created date"""
-
- timestamp = datetime.fromtimestamp(record.created)
- return timestamp.strftime("%y-%m-%d %H:%M:%S,%f"),
-
|
Use the same logic to format message and asctime than the standard library.
|
## Code Before:
import logging
import json
import re
from datetime import datetime
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
mappings = {
'asctime': create_timestamp,
'message': lambda r: r.msg,
}
formatters = self.parse()
log_record = {}
for formatter in formatters:
try:
log_record[formatter] = mappings[formatter](record)
except KeyError:
log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
def create_timestamp(record):
"""Creates a human readable timestamp for a log records created date"""
timestamp = datetime.fromtimestamp(record.created)
return timestamp.strftime("%y-%m-%d %H:%M:%S,%f"),
## Instruction:
Use the same logic to format message and asctime than the standard library.
## Code After:
import logging
import json
import re
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
formatters = self.parse()
record.message = record.getMessage()
# only format time if needed
if "asctime" in formatters:
record.asctime = self.formatTime(record, self.datefmt)
log_record = {}
for formatter in formatters:
log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
|
import logging
import json
import re
- from datetime import datetime
+
class JsonFormatter(logging.Formatter):
"""A custom formatter to format logging records as json objects"""
def parse(self):
standard_formatters = re.compile(r'\((.*?)\)', re.IGNORECASE)
return standard_formatters.findall(self._fmt)
def format(self, record):
"""Formats a log record and serializes to json"""
- mappings = {
- 'asctime': create_timestamp,
- 'message': lambda r: r.msg,
- }
formatters = self.parse()
+ record.message = record.getMessage()
+ # only format time if needed
+ if "asctime" in formatters:
+ record.asctime = self.formatTime(record, self.datefmt)
+
log_record = {}
for formatter in formatters:
- try:
- log_record[formatter] = mappings[formatter](record)
- except KeyError:
- log_record[formatter] = record.__dict__[formatter]
? ----
+ log_record[formatter] = record.__dict__[formatter]
return json.dumps(log_record)
-
- def create_timestamp(record):
- """Creates a human readable timestamp for a log records created date"""
-
- timestamp = datetime.fromtimestamp(record.created)
- return timestamp.strftime("%y-%m-%d %H:%M:%S,%f"),
|
3cd25ea433518ec9b25a5e646e63413ebd0ffcd4
|
parse.py
|
parse.py
|
import sys
indentation = 0
repl = [
('%', '_ARSCL', '['),
('$', '_ARSCR', ']'),
('#', '_EQOP', '='),
('<', '_PARL', '('),
('>', '_PARR', ')'),
]
sin = sys.argv[1]
for r in repl:
sin = sin.replace(r[0], r[1])
for r in repl:
sin = sin.replace(r[1], r[2])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
exec(l)
|
import sys
import simplejson as json
indentation = 0
lang_def = None
with open('language.json') as lang_def_file:
lang_def = json.loads(lang_def_file.read())
if lang_def is None:
print("error reading json language definition")
exit(1)
repl = lang_def['rules']
sin = sys.argv[1]
for r in repl:
sin = sin.replace(r['lang_rep'], r['il_rep'])
for r in repl:
sin = sin.replace(r['il_rep'], r['python_rep'])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
try:
r = eval(l)
if r is not None:
print(r)
except:
try:
exec(l)
except:
print("ERROR OMG ERROR" + str(l))
|
Read json language rep and try to eval/exec stdin
|
Read json language rep and try to eval/exec stdin
|
Python
|
unlicense
|
philipdexter/build-a-lang
|
import sys
+ import simplejson as json
indentation = 0
- repl = [
- ('%', '_ARSCL', '['),
- ('$', '_ARSCR', ']'),
- ('#', '_EQOP', '='),
- ('<', '_PARL', '('),
- ('>', '_PARR', ')'),
- ]
+ lang_def = None
+ with open('language.json') as lang_def_file:
+ lang_def = json.loads(lang_def_file.read())
+
+ if lang_def is None:
+ print("error reading json language definition")
+ exit(1)
+
+ repl = lang_def['rules']
sin = sys.argv[1]
for r in repl:
- sin = sin.replace(r[0], r[1])
+ sin = sin.replace(r['lang_rep'], r['il_rep'])
for r in repl:
- sin = sin.replace(r[1], r[2])
+ sin = sin.replace(r['il_rep'], r['python_rep'])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
- exec(l)
+ try:
+ r = eval(l)
+ if r is not None:
+ print(r)
+ except:
+ try:
+ exec(l)
+ except:
+ print("ERROR OMG ERROR" + str(l))
|
Read json language rep and try to eval/exec stdin
|
## Code Before:
import sys
indentation = 0
repl = [
('%', '_ARSCL', '['),
('$', '_ARSCR', ']'),
('#', '_EQOP', '='),
('<', '_PARL', '('),
('>', '_PARR', ')'),
]
sin = sys.argv[1]
for r in repl:
sin = sin.replace(r[0], r[1])
for r in repl:
sin = sin.replace(r[1], r[2])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
exec(l)
## Instruction:
Read json language rep and try to eval/exec stdin
## Code After:
import sys
import simplejson as json
indentation = 0
lang_def = None
with open('language.json') as lang_def_file:
lang_def = json.loads(lang_def_file.read())
if lang_def is None:
print("error reading json language definition")
exit(1)
repl = lang_def['rules']
sin = sys.argv[1]
for r in repl:
sin = sin.replace(r['lang_rep'], r['il_rep'])
for r in repl:
sin = sin.replace(r['il_rep'], r['python_rep'])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
try:
r = eval(l)
if r is not None:
print(r)
except:
try:
exec(l)
except:
print("ERROR OMG ERROR" + str(l))
|
import sys
+ import simplejson as json
indentation = 0
- repl = [
- ('%', '_ARSCL', '['),
- ('$', '_ARSCR', ']'),
- ('#', '_EQOP', '='),
- ('<', '_PARL', '('),
- ('>', '_PARR', ')'),
- ]
+ lang_def = None
+ with open('language.json') as lang_def_file:
+ lang_def = json.loads(lang_def_file.read())
+
+ if lang_def is None:
+ print("error reading json language definition")
+ exit(1)
+
+ repl = lang_def['rules']
sin = sys.argv[1]
for r in repl:
- sin = sin.replace(r[0], r[1])
? ^ ^
+ sin = sin.replace(r['lang_rep'], r['il_rep'])
? ^^^^^^^^^^ ^^^^^^^^
for r in repl:
- sin = sin.replace(r[1], r[2])
+ sin = sin.replace(r['il_rep'], r['python_rep'])
sin = sin.replace('\\n', '\n')
for l in sin.splitlines():
- exec(l)
+ try:
+ r = eval(l)
+ if r is not None:
+ print(r)
+ except:
+ try:
+ exec(l)
+ except:
+ print("ERROR OMG ERROR" + str(l))
|
b0ae4cb386411ae8ae5fd27b19ddb415d0772cf3
|
democracy_club/apps/everyelection/forms.py
|
democracy_club/apps/everyelection/forms.py
|
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
for area in self.cleaned_data['areas']:
AuthorityElectionPosition.objects.get_or_create(
authority_election=self.instance,
user=self.user,
area_id=area
)
return super().clean(*args, **kwargs)
|
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
if 'areas' in self.cleaned_data:
for area in self.cleaned_data['areas']:
AuthorityElectionPosition.objects.get_or_create(
authority_election=self.instance,
user=self.user,
area_id=area
)
return super().clean(*args, **kwargs)
|
Check that at least one area has been checked
|
Check that at least one area has been checked
|
Python
|
bsd-3-clause
|
DemocracyClub/Website,DemocracyClub/Website,DemocracyClub/Website,DemocracyClub/Website
|
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
+ if 'areas' in self.cleaned_data:
- for area in self.cleaned_data['areas']:
+ for area in self.cleaned_data['areas']:
- AuthorityElectionPosition.objects.get_or_create(
+ AuthorityElectionPosition.objects.get_or_create(
- authority_election=self.instance,
+ authority_election=self.instance,
- user=self.user,
+ user=self.user,
- area_id=area
+ area_id=area
- )
+ )
return super().clean(*args, **kwargs)
|
Check that at least one area has been checked
|
## Code Before:
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
for area in self.cleaned_data['areas']:
AuthorityElectionPosition.objects.get_or_create(
authority_election=self.instance,
user=self.user,
area_id=area
)
return super().clean(*args, **kwargs)
## Instruction:
Check that at least one area has been checked
## Code After:
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
if 'areas' in self.cleaned_data:
for area in self.cleaned_data['areas']:
AuthorityElectionPosition.objects.get_or_create(
authority_election=self.instance,
user=self.user,
area_id=area
)
return super().clean(*args, **kwargs)
|
from django.forms import (ModelForm, CheckboxSelectMultiple,
MultipleChoiceField)
from .models import AuthorityElection, AuthorityElectionPosition
class AuthorityAreaForm(ModelForm):
def __init__(self, user, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user = user
# import ipdb; ipdb.set_trace().
self.fields['areas'] = MultipleChoiceField(
choices=[
(a.pk, a.name) for a in self.instance.authority.child_areas],
label="Wards",
widget=CheckboxSelectMultiple
)
class Meta:
model = AuthorityElection
fields = []
def clean(self, *args, **kwargs):
+ if 'areas' in self.cleaned_data:
- for area in self.cleaned_data['areas']:
+ for area in self.cleaned_data['areas']:
? ++++
- AuthorityElectionPosition.objects.get_or_create(
+ AuthorityElectionPosition.objects.get_or_create(
? ++++
- authority_election=self.instance,
+ authority_election=self.instance,
? ++++
- user=self.user,
+ user=self.user,
? ++++
- area_id=area
+ area_id=area
? ++++
- )
+ )
? ++++
return super().clean(*args, **kwargs)
|
d2076f6fd3a0bb687224048de904207c885aba5c
|
utils.py
|
utils.py
|
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
|
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
def constants(namespace, names):
for name in names:
namespace[name] = Constant(name)
|
Make it easier to define constants
|
Make it easier to define constants
|
Python
|
unlicense
|
drkitty/python3-base,drkitty/python3-base
|
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
+
+ def constants(namespace, names):
+ for name in names:
+ namespace[name] = Constant(name)
+
|
Make it easier to define constants
|
## Code Before:
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
## Instruction:
Make it easier to define constants
## Code After:
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
def constants(namespace, names):
for name in names:
namespace[name] = Constant(name)
|
from functools import wraps
def cached_property(f):
name = f.__name__
@property
@wraps(f)
def inner(self):
if not hasattr(self, "_property_cache"):
self._property_cache = {}
if name not in self._property_cache:
self._property_cache[name] = f(self)
return self._property_cache[name]
return inner
class Constant():
def __init__(self, x):
self.x = x
def __repr__(self):
return self.x
+
+
+ def constants(namespace, names):
+ for name in names:
+ namespace[name] = Constant(name)
|
6aee1c51d2607047091280abb56d2956cebe1ebb
|
zvm/zstring.py
|
zvm/zstring.py
|
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
# We must read in sequence bits 14-10, 9-5, 4-0.
offset = (2 - self._char_in_block) * 5
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Make the string translator return the actual right values!
|
Make the string translator return the actual right values!
* zvm/zstring.py:
(ZStringStream._get_block): Remove debug printing.
(ZStringStream.get): Make the offset calculations work on the
correct bits of the data chunk. Remove debug printing.
|
Python
|
bsd-3-clause
|
sussman/zvm,sussman/zvm
|
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
- print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
+ # We must read in sequence bits 14-10, 9-5, 4-0.
- offset = self._char_in_block * 5
+ offset = (2 - self._char_in_block) * 5
- print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
Make the string translator return the actual right values!
|
## Code Before:
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
offset = self._char_in_block * 5
print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
## Instruction:
Make the string translator return the actual right values!
## Code After:
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
# We must read in sequence bits 14-10, 9-5, 4-0.
offset = (2 - self._char_in_block) * 5
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
class ZStringEndOfString(Exception):
"""No more data left in string."""
class ZStringStream(object):
"""This class takes an address and a ZMemory, and treats that as
the begginning of a ZString. Subsequent calls to get() will return
one ZChar code at a time, raising ZStringEndOfString when there is
no more data."""
def __init__(self, zmem, addr):
self._mem = zmem
self._addr = addr
self._has_ended = False
self._get_block()
def _get_block(self):
from bitfield import BitField
chunk = self._mem[self._addr:self._addr+2]
- print chunk
self._data = BitField(''.join([chr(x) for x in chunk]))
self._addr += 2
self._char_in_block = 0
def get(self, num=1):
if self._has_ended:
raise ZStringEndOfString
+ # We must read in sequence bits 14-10, 9-5, 4-0.
- offset = self._char_in_block * 5
+ offset = (2 - self._char_in_block) * 5
? +++++ +
- print offset
zchar = self._data[offset:offset+5]
if self._char_in_block == 2:
# If end-of-string marker is set...
if self._data[15] == 1:
self._has_ended = True
else:
self._get_block()
else:
self._char_in_block += 1
return zchar
|
c9ed6fe84b7f55ba2e9dc75d9ddf8cb0e7f9eb8c
|
pixelmap/pixel.py
|
pixelmap/pixel.py
|
from itertools import count
class Pixel:
new_id = count(1)
def __init__(self):
"""Pixel constructor"""
self.id = next(self.new_id)
def __str__(self):
return str(self.id)
def __repr__(self):
return self.__str__()
|
from itertools import count
class Pixel:
new_id = count(1)
def __init__(self, data=None):
"""Pixel constructor"""
self.id = next(self.new_id)
self.data = data
def __str__(self):
return str(self.data)
|
Add data dict as Pixel member.
|
Add data dict as Pixel member.
|
Python
|
mit
|
yebra06/pixelmap
|
from itertools import count
class Pixel:
new_id = count(1)
- def __init__(self):
+ def __init__(self, data=None):
"""Pixel constructor"""
self.id = next(self.new_id)
+ self.data = data
def __str__(self):
- return str(self.id)
+ return str(self.data)
- def __repr__(self):
- return self.__str__()
-
|
Add data dict as Pixel member.
|
## Code Before:
from itertools import count
class Pixel:
new_id = count(1)
def __init__(self):
"""Pixel constructor"""
self.id = next(self.new_id)
def __str__(self):
return str(self.id)
def __repr__(self):
return self.__str__()
## Instruction:
Add data dict as Pixel member.
## Code After:
from itertools import count
class Pixel:
new_id = count(1)
def __init__(self, data=None):
"""Pixel constructor"""
self.id = next(self.new_id)
self.data = data
def __str__(self):
return str(self.data)
|
from itertools import count
class Pixel:
new_id = count(1)
- def __init__(self):
+ def __init__(self, data=None):
? +++++++++++
"""Pixel constructor"""
self.id = next(self.new_id)
+ self.data = data
def __str__(self):
- return str(self.id)
? -
+ return str(self.data)
? +++
-
- def __repr__(self):
- return self.__str__()
|
cf94fb86cab2fc892b762b66b760a80ed268e8b3
|
social/accounts/__init__.py
|
social/accounts/__init__.py
|
"""Import and register all account types."""
from abc import ABC, abstractmethod
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
|
"""Import and register all account types."""
from abc import ABC, abstractmethod
__all__ = ['github']
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
|
Add github to the accounts package.
|
Add github to the accounts package.
|
Python
|
bsd-3-clause
|
brenns10/social,brenns10/social
|
"""Import and register all account types."""
from abc import ABC, abstractmethod
+
+ __all__ = ['github']
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
|
Add github to the accounts package.
|
## Code Before:
"""Import and register all account types."""
from abc import ABC, abstractmethod
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
## Instruction:
Add github to the accounts package.
## Code After:
"""Import and register all account types."""
from abc import ABC, abstractmethod
__all__ = ['github']
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
|
"""Import and register all account types."""
from abc import ABC, abstractmethod
+
+ __all__ = ['github']
class Account(ABC):
@abstractmethod
def __init__(self, *breadcrumbs):
"""
Return an Account object corresponding to the breadcrumbs.
This should only be called if "match" returned truthy about matching the
breadcrumbs. Otherwise, you're just mean.
"""
pass
@staticmethod
@abstractmethod
def match(*breadcrumbs):
"""
Return truthy if the breadcrumbs match the account.
The breadcrumbs are described below, but match functions should be
written to gracefully accept more or less keys in the breadcrumbs.
:param dict breadcrumbs: Dictionary containing at least one of the
following breadcrumbs:
- url: A URL that probably points to their profile.
- email: An email that could be used to find the profile.
- username: A username for the account.
"""
pass
@abstractmethod
def expand(self, info):
"""
Return an iterable of breadcrumb structs!
:param info: A dictionary that should contain information about the
person. It should be updated with any information you come across,
and you may want to use any info in it to help narrow down your
search.
"""
pass
|
8e28c627c0a84939bb44c2c77fa3e4b3de4932bf
|
erroneous/models.py
|
erroneous/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
null=True, blank=True, verify_exists=False,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
null=True, blank=True,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
|
Remove verify_exists kwarg, which was deprecated in django 1.3 and causes an error in django 1.5
|
Remove verify_exists kwarg, which was deprecated in django 1.3 and causes an error in django 1.5
|
Python
|
mit
|
mbelousov/django-erroneous,mbelousov/django-erroneous,mridang/django-erroneous
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
- null=True, blank=True, verify_exists=False,
+ null=True, blank=True,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
|
Remove verify_exists kwarg, which was deprecated in django 1.3 and causes an error in django 1.5
|
## Code Before:
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
null=True, blank=True, verify_exists=False,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
## Instruction:
Remove verify_exists kwarg, which was deprecated in django 1.3 and causes an error in django 1.5
## Code After:
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
null=True, blank=True,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Error(models.Model):
"""
Model for storing the individual errors.
"""
kind = models.CharField(_('type'),
null=True, blank=True, max_length=128, db_index=True
)
info = models.TextField(
null=False,
)
data = models.TextField(
blank=True, null=True
)
path = models.URLField(
- null=True, blank=True, verify_exists=False,
+ null=True, blank=True,
)
when = models.DateTimeField(
null=False, auto_now_add=True, db_index=True,
)
html = models.TextField(
null=True, blank=True,
)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""
Meta information for the model.
"""
verbose_name = _('Error')
verbose_name_plural = _('Errors')
def __unicode__(self):
"""
String representation of the object.
"""
return "%s: %s" % (self.kind, self.info)
|
5923d751d9541758a67915db67ee799ba0d1cd6d
|
polling_stations/api/mixins.py
|
polling_stations/api/mixins.py
|
from rest_framework.decorators import list_route
from rest_framework.response import Response
class PollingEntityMixin():
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
serializer = self.get_serializer(
queryset, many=True, read_only=True, context={'request': request})
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
|
from rest_framework.decorators import list_route
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
class LargeResultsSetPagination(LimitOffsetPagination):
default_limit = 100
max_limit = 1000
class PollingEntityMixin():
pagination_class = LargeResultsSetPagination
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
if 'council_id' not in request.query_params:
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(
page,
many=True,
read_only=True,
context={'request': request}
)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(
queryset,
many=True,
read_only=True,
context={'request': request}
)
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
|
Use pagination on stations and districts endpoints with no filter
|
Use pagination on stations and districts endpoints with no filter
If no filter is passed to /pollingstations or /pollingdistricts
use pagination (when filtering, there is no pagination)
This means:
- HTML outputs stay responsive/useful
- People can't tie up our server with a query that says
'give me boundaries for all polling districts in the country'
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
from rest_framework.decorators import list_route
+ from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
+ class LargeResultsSetPagination(LimitOffsetPagination):
+ default_limit = 100
+ max_limit = 1000
+
+
class PollingEntityMixin():
+
+ pagination_class = LargeResultsSetPagination
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
+
+ if 'council_id' not in request.query_params:
+ page = self.paginate_queryset(queryset)
+ if page is not None:
+ serializer = self.get_serializer(
+ page,
+ many=True,
+ read_only=True,
+ context={'request': request}
+ )
+ return self.get_paginated_response(serializer.data)
+
serializer = self.get_serializer(
- queryset, many=True, read_only=True, context={'request': request})
+ queryset,
+ many=True,
+ read_only=True,
+ context={'request': request}
+ )
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
|
Use pagination on stations and districts endpoints with no filter
|
## Code Before:
from rest_framework.decorators import list_route
from rest_framework.response import Response
class PollingEntityMixin():
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
serializer = self.get_serializer(
queryset, many=True, read_only=True, context={'request': request})
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
## Instruction:
Use pagination on stations and districts endpoints with no filter
## Code After:
from rest_framework.decorators import list_route
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
class LargeResultsSetPagination(LimitOffsetPagination):
default_limit = 100
max_limit = 1000
class PollingEntityMixin():
pagination_class = LargeResultsSetPagination
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
if 'council_id' not in request.query_params:
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(
page,
many=True,
read_only=True,
context={'request': request}
)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(
queryset,
many=True,
read_only=True,
context={'request': request}
)
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
|
from rest_framework.decorators import list_route
+ from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
+ class LargeResultsSetPagination(LimitOffsetPagination):
+ default_limit = 100
+ max_limit = 1000
+
+
class PollingEntityMixin():
+
+ pagination_class = LargeResultsSetPagination
def output(self, request):
if not self.validate_request():
return Response(
{'detail': 'council_id parameter must be specified'}, 400)
queryset = self.get_queryset()
+
+ if 'council_id' not in request.query_params:
+ page = self.paginate_queryset(queryset)
+ if page is not None:
+ serializer = self.get_serializer(
+ page,
+ many=True,
+ read_only=True,
+ context={'request': request}
+ )
+ return self.get_paginated_response(serializer.data)
+
serializer = self.get_serializer(
- queryset, many=True, read_only=True, context={'request': request})
+ queryset,
+ many=True,
+ read_only=True,
+ context={'request': request}
+ )
return Response(serializer.data)
def list(self, request, *args, **kwargs):
self.geo = False
return self.output(request)
@list_route(url_path='geo')
def geo(self, request, format=None):
self.geo = True
return self.output(request)
|
6e4e073b4979bbf6099d0054181f13134f8cfe73
|
stash_test_case.py
|
stash_test_case.py
|
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
|
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
def tearDown(self):
"""Removes all stashed patches."""
for patch_name in os.listdir(self.PATCHES_PATH):
os.unlink(os.path.join(self.PATCHES_PATH, patch_name))
|
Remove patches in between unit tests.
|
Remove patches in between unit tests.
|
Python
|
bsd-3-clause
|
ton/stash,ton/stash
|
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
+ def tearDown(self):
+ """Removes all stashed patches."""
+ for patch_name in os.listdir(self.PATCHES_PATH):
+ os.unlink(os.path.join(self.PATCHES_PATH, patch_name))
+
|
Remove patches in between unit tests.
|
## Code Before:
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
## Instruction:
Remove patches in between unit tests.
## Code After:
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
def tearDown(self):
"""Removes all stashed patches."""
for patch_name in os.listdir(self.PATCHES_PATH):
os.unlink(os.path.join(self.PATCHES_PATH, patch_name))
|
import os
import shutil
import unittest
from stash import Stash
class StashTestCase(unittest.TestCase):
"""Base class for test cases that test stash functionality.
This base class makes sure that all unit tests are executed in a sandbox
environment.
"""
PATCHES_PATH = os.path.join('test', '.patches')
REPOSITORY_URI = os.path.join('test', '.repo')
@classmethod
def setUpClass(cls):
"""Makes sure that stash will look for patches in the patches path in
the test directory, and that the repository directory exists.
"""
if not os.path.exists(cls.REPOSITORY_URI):
os.mkdir(cls.REPOSITORY_URI)
if not os.path.exists(cls.PATCHES_PATH):
os.mkdir(cls.PATCHES_PATH)
Stash.PATCHES_PATH = cls.PATCHES_PATH
@classmethod
def tearDownClass(cls):
"""Cleans up the temporary patches path used for the unit tests."""
if os.path.exists(cls.PATCHES_PATH):
shutil.rmtree(cls.PATCHES_PATH)
# Clean up the temporary repository.
if os.path.exists(cls.REPOSITORY_URI):
shutil.rmtree(cls.REPOSITORY_URI)
+
+ def tearDown(self):
+ """Removes all stashed patches."""
+ for patch_name in os.listdir(self.PATCHES_PATH):
+ os.unlink(os.path.join(self.PATCHES_PATH, patch_name))
|
12a61da411134d2fc02e91d41b6687de8763a374
|
modules/pipetruncate.py
|
modules/pipetruncate.py
|
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
for i in xrange(0, limit):
yield _INPUT.next()
|
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
i = 0
for item in _INPUT:
if i >= limit:
break
yield item
i += 1
|
Fix for taking feed from a split output
|
Fix for taking feed from a split output
|
Python
|
mit
|
nerevu/riko,nerevu/riko
|
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
- for i in xrange(0, limit):
- yield _INPUT.next()
-
+ i = 0
+ for item in _INPUT:
+ if i >= limit:
+ break
+ yield item
+ i += 1
+
|
Fix for taking feed from a split output
|
## Code Before:
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
for i in xrange(0, limit):
yield _INPUT.next()
## Instruction:
Fix for taking feed from a split output
## Code After:
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
i = 0
for item in _INPUT:
if i >= limit:
break
yield item
i += 1
|
from pipe2py import util
def pipe_truncate(context, _INPUT, conf, **kwargs):
"""This operator truncates the number of items in a feed.
Keyword arguments:
context -- pipeline context
_INPUT -- source generator
kwargs -- terminal, if the truncation value is wired in
conf:
count -- length of the truncated feed, if specified literally
Yields (_OUTPUT):
truncated list of source items
"""
count = conf['count']
limit = int(util.get_value(count, None, **kwargs))
- for i in xrange(0, limit):
- yield _INPUT.next()
-
+ i = 0
+ for item in _INPUT:
+ if i >= limit:
+ break
+ yield item
+ i += 1
|
f8ff675f8c9a4ef2b370e5254d33b97261a9d8ca
|
byceps/util/sentry.py
|
byceps/util/sentry.py
|
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
app_mode = app.config.get('APP_MODE')
sentry_sdk.set_tag('app_mode', app_mode)
if app_mode == 'site':
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
Set Sentry `site_id` tag only in site app mode
|
Set Sentry `site_id` tag only in site app mode
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
|
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
+ app_mode = app.config.get('APP_MODE')
- sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
+ sentry_sdk.set_tag('app_mode', app_mode)
+
+ if app_mode == 'site':
- sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
+ sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
Set Sentry `site_id` tag only in site app mode
|
## Code Before:
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
## Instruction:
Set Sentry `site_id` tag only in site app mode
## Code After:
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
app_mode = app.config.get('APP_MODE')
sentry_sdk.set_tag('app_mode', app_mode)
if app_mode == 'site':
sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
from flask import Flask
def configure_sentry_for_webapp(dsn: str, environment: str, app: Flask) -> None:
"""Initialize and configure the Sentry SDK for the Flask-based web
application (both in 'admin' and 'site' modes).
"""
import sentry_sdk
from sentry_sdk.integrations.flask import FlaskIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[FlaskIntegration()],
)
+ app_mode = app.config.get('APP_MODE')
- sentry_sdk.set_tag('app_mode', app.config.get('APP_MODE'))
? ^^ ^^^^^^ ------------ -
+ sentry_sdk.set_tag('app_mode', app_mode)
? ^^ ^
+
+ if app_mode == 'site':
- sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
+ sentry_sdk.set_tag('site_id', app.config.get('SITE_ID'))
? ++++
def configure_sentry_for_worker(dsn: str, environment: str) -> None:
"""Initialize and configure the Sentry SDK for the RQ worker."""
import sentry_sdk
from sentry_sdk.integrations.rq import RqIntegration
sentry_sdk.init(
dsn=dsn, environment=environment, integrations=[RqIntegration()],
)
sentry_sdk.set_tag('app_mode', 'worker')
|
4ba0b5fe7f31d4353e9c091b03df7324d1c20e88
|
heat/common/pluginutils.py
|
heat/common/pluginutils.py
|
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
'message': exception.message,
'name': entrypoint.name})
|
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
'message': getattr(exception, 'message',
six.text_type(exception)),
'name': entrypoint.name})
|
Fix no message attribute in exception
|
Fix no message attribute in exception
For py35, message attribute in exception seems removed.
We should directly get the string message from exception object
if message attribute not presented. And since get message attribute
already been deprecated. We should remove sopport on
exception.message after we fully jump to py35.
Partial-Bug: #1704725
Change-Id: I3970aa7c161aa82d179779f1a2f46405d5b0dddb
|
Python
|
apache-2.0
|
noironetworks/heat,noironetworks/heat,openstack/heat,openstack/heat
|
from oslo_log import log as logging
+ import six
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
- 'message': exception.message,
+ 'message': getattr(exception, 'message',
+ six.text_type(exception)),
'name': entrypoint.name})
|
Fix no message attribute in exception
|
## Code Before:
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
'message': exception.message,
'name': entrypoint.name})
## Instruction:
Fix no message attribute in exception
## Code After:
from oslo_log import log as logging
import six
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
'message': getattr(exception, 'message',
six.text_type(exception)),
'name': entrypoint.name})
|
from oslo_log import log as logging
+ import six
LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception):
LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name,
- 'message': exception.message,
? ^
+ 'message': getattr(exception, 'message',
? ++++++++ ^^^ +
+ six.text_type(exception)),
'name': entrypoint.name})
|
39421ab0e74bbcab610aead0924a177a164404a6
|
Cura/Qt/MainWindow.py
|
Cura/Qt/MainWindow.py
|
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
|
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
from OpenGL.GL.GREMEDY.string_marker import *
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
if bool(glStringMarkerGREMEDY):
msg = b"Begin Rendering Background"
glStringMarkerGREMEDY(len(msg), msg)
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
if bool(glStringMarkerGREMEDY):
msg = "End Rendering Background"
glStringMarkerGREMEDY(len(msg), msg)
|
Add some debug markers for more clearly finding our own rendering code
|
Add some debug markers for more clearly finding our own rendering code
|
Python
|
agpl-3.0
|
onitake/Uranium,onitake/Uranium
|
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
+ from OpenGL.GL.GREMEDY.string_marker import *
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
+ if bool(glStringMarkerGREMEDY):
+ msg = b"Begin Rendering Background"
+ glStringMarkerGREMEDY(len(msg), msg)
+
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
+ if bool(glStringMarkerGREMEDY):
+ msg = "End Rendering Background"
+ glStringMarkerGREMEDY(len(msg), msg)
+
|
Add some debug markers for more clearly finding our own rendering code
|
## Code Before:
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
## Instruction:
Add some debug markers for more clearly finding our own rendering code
## Code After:
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
from OpenGL.GL.GREMEDY.string_marker import *
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
if bool(glStringMarkerGREMEDY):
msg = b"Begin Rendering Background"
glStringMarkerGREMEDY(len(msg), msg)
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
if bool(glStringMarkerGREMEDY):
msg = "End Rendering Background"
glStringMarkerGREMEDY(len(msg), msg)
|
from PyQt5.QtCore import pyqtProperty, QObject
from PyQt5.QtGui import QColor
from PyQt5.QtQuick import QQuickWindow, QQuickItem
from OpenGL import GL
+ from OpenGL.GL.GREMEDY.string_marker import *
class MainWindow(QQuickWindow):
def __init__(self, parent = None):
super(MainWindow, self).__init__(parent)
self._app = None
self._backgroundColor = QColor(204, 204, 204, 255)
self.setClearBeforeRendering(False)
self.beforeRendering.connect(self._render)
def getApplication(self):
return self._app
def setApplication(self, app):
self._app = app
application = pyqtProperty(QObject, fget=getApplication, fset=setApplication)
def getBackgroundColor(self):
return self._backgroundColor
def setBackgroundColor(self, color):
self._backgroundColor = color
backgroundColor = pyqtProperty(QColor, fget=getBackgroundColor, fset=setBackgroundColor)
def _render(self):
+ if bool(glStringMarkerGREMEDY):
+ msg = b"Begin Rendering Background"
+ glStringMarkerGREMEDY(len(msg), msg)
+
GL.glClearColor(self._backgroundColor.redF(), self._backgroundColor.greenF(), self._backgroundColor.blueF(), self._backgroundColor.alphaF())
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
if self._app:
self._app.getController().getActiveView().render()
+
+ if bool(glStringMarkerGREMEDY):
+ msg = "End Rendering Background"
+ glStringMarkerGREMEDY(len(msg), msg)
|
ad278fdc71140dfb4be27895e747356e668e3b6c
|
teuthology/lockstatus.py
|
teuthology/lockstatus.py
|
import requests
import os
from .config import config
def get_status(name):
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
|
import requests
import os
from .config import config
from .misc import canonicalize_hostname
def get_status(name):
name = canonicalize_hostname(name, user=None)
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
|
Remove the 'user@' prefix before checking status
|
Remove the 'user@' prefix before checking status
Signed-off-by: Zack Cerza <[email protected]>
|
Python
|
mit
|
ceph/teuthology,t-miyamae/teuthology,zhouyuan/teuthology,caibo2014/teuthology,tchaikov/teuthology,dmick/teuthology,robbat2/teuthology,dreamhost/teuthology,ivotron/teuthology,ivotron/teuthology,SUSE/teuthology,robbat2/teuthology,michaelsevilla/teuthology,tchaikov/teuthology,michaelsevilla/teuthology,ceph/teuthology,dreamhost/teuthology,dmick/teuthology,yghannam/teuthology,t-miyamae/teuthology,zhouyuan/teuthology,dmick/teuthology,ktdreyer/teuthology,ktdreyer/teuthology,SUSE/teuthology,caibo2014/teuthology,SUSE/teuthology,yghannam/teuthology
|
import requests
import os
from .config import config
+ from .misc import canonicalize_hostname
def get_status(name):
+ name = canonicalize_hostname(name, user=None)
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
|
Remove the 'user@' prefix before checking status
|
## Code Before:
import requests
import os
from .config import config
def get_status(name):
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
## Instruction:
Remove the 'user@' prefix before checking status
## Code After:
import requests
import os
from .config import config
from .misc import canonicalize_hostname
def get_status(name):
name = canonicalize_hostname(name, user=None)
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
|
import requests
import os
from .config import config
+ from .misc import canonicalize_hostname
def get_status(name):
+ name = canonicalize_hostname(name, user=None)
uri = os.path.join(config.lock_server, 'nodes', name, '')
response = requests.get(uri)
success = response.ok
if success:
return response.json()
return None
|
8df7c8b048bc7c2883819869027764e030c8a2e6
|
fabfile.py
|
fabfile.py
|
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
static_path = "/var/www/static/"
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
|
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
static_path = "/var/www/nkhumphreys/assets/static/"
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
|
Change location of static files on server
|
Change location of static files on server
|
Python
|
mit
|
nkhumphreys/gobananas,nkhumphreys/gobananas,nkhumphreys/gobananas
|
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
- static_path = "/var/www/static/"
+ static_path = "/var/www/nkhumphreys/assets/static/"
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
|
Change location of static files on server
|
## Code Before:
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
static_path = "/var/www/static/"
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
## Instruction:
Change location of static files on server
## Code After:
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
static_path = "/var/www/nkhumphreys/assets/static/"
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
|
from fabric.api import local, env, sudo
env.hosts = ['nkhumphreys.co.uk']
env.user = 'root'
NAME = "gobananas"
def deploy():
base_cmd = "scp -r {local_path} root@{host}:{remote_path}"
remote_path = "/tmp"
template_path = "/var/www/templates/"
- static_path = "/var/www/static/"
+ static_path = "/var/www/nkhumphreys/assets/static/"
? +++++++++++++++++++
for h in env.hosts:
cmd = base_cmd.format(local_path=NAME,
host=h,
remote_path=remote_path)
local(cmd)
cmd = base_cmd.format(local_path="./templates/*",
host=h,
remote_path=template_path)
local(cmd)
cmd = base_cmd.format(local_path="./static/*",
host=h,
remote_path=static_path)
local(cmd)
sudo("mv %s/%s /usr/bin" % (remote_path, NAME))
sudo("supervisorctl restart %s" % NAME)
def logs():
cmd = "tail -f /var/log/supervisor/{name}-*.log"
cmd = cmd.format(name=NAME)
sudo(cmd)
|
173b4f39433aa27970955173e63f99f58cfeecb1
|
custom/enikshay/urls.py
|
custom/enikshay/urls.py
|
from django.conf.urls import patterns, include
urlpatterns = patterns(
'custom.enikshay.integrations.ninetyninedots.views',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
|
from django.conf.urls import patterns, include
urlpatterns = patterns(
'',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
|
Remove reference to wrong view
|
Remove reference to wrong view
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq
|
from django.conf.urls import patterns, include
urlpatterns = patterns(
- 'custom.enikshay.integrations.ninetyninedots.views',
+ '',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
|
Remove reference to wrong view
|
## Code Before:
from django.conf.urls import patterns, include
urlpatterns = patterns(
'custom.enikshay.integrations.ninetyninedots.views',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
## Instruction:
Remove reference to wrong view
## Code After:
from django.conf.urls import patterns, include
urlpatterns = patterns(
'',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
|
from django.conf.urls import patterns, include
urlpatterns = patterns(
- 'custom.enikshay.integrations.ninetyninedots.views',
+ '',
(r'^99dots/', include("custom.enikshay.integrations.ninetyninedots.urls")),
)
|
214511a6fbdd0763667e740735d0876f78a3b244
|
derpibooru/query.py
|
derpibooru/query.py
|
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
"q": q,
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
self._parameters["q"] = [str(tag).strip() for tag in q]
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
|
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
"q": [str(tag).strip() for tag in q if tag],
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
self._parameters["q"] = [str(tag).strip() for tag in q if tag]
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
|
Add check for empty tags
|
Add check for empty tags
|
Python
|
bsd-2-clause
|
joshua-stone/DerPyBooru
|
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
- "q": q,
+ "q": [str(tag).strip() for tag in q if tag],
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
- self._parameters["q"] = [str(tag).strip() for tag in q]
+ self._parameters["q"] = [str(tag).strip() for tag in q if tag]
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
|
Add check for empty tags
|
## Code Before:
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
"q": q,
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
self._parameters["q"] = [str(tag).strip() for tag in q]
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
## Instruction:
Add check for empty tags
## Code After:
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
"q": [str(tag).strip() for tag in q if tag],
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
self._parameters["q"] = [str(tag).strip() for tag in q if tag]
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
|
from .request import url
class Search(object):
def __init__(self, key=None, q=[], sf="created_at", sd="desc"):
self._parameters = {
"key": key,
- "q": q,
+ "q": [str(tag).strip() for tag in q if tag],
"sf": sf,
"sd": sd
}
@property
def parameters(self):
return self._parameters
@property
def url(self):
return url(**self.parameters)
def key(self, key=None):
self._parameters["key"] = key
return Search(**self._parameters)
def query(self, *q):
- self._parameters["q"] = [str(tag).strip() for tag in q]
+ self._parameters["q"] = [str(tag).strip() for tag in q if tag]
? +++++++
return Search(**self._parameters)
def descending(self):
self._parameters["sd"] = "desc"
return Search(**self._parameters)
def ascending(self):
self._parameters["sd"] = "asc"
return Search(**self._parameters)
def sort_by(self, sf):
self._parameters["sf"] = sf
return Search(**self._parameters)
|
54d4551ce8efb16d4a8d02e38b9f223f8f1cd816
|
ab_game.py
|
ab_game.py
|
import board
import pente_exceptions
from ab_state import *
CAPTURE_SCORE_BASE = 120 ** 3
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
if depth > 4:
min_priority = 3
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
|
import board
import pente_exceptions
from ab_state import *
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
|
Disable min_priority filter for now
|
Disable min_priority filter for now
|
Python
|
mit
|
cropleyb/pentai,cropleyb/pentai,cropleyb/pentai
|
import board
import pente_exceptions
from ab_state import *
-
- CAPTURE_SCORE_BASE = 120 ** 3
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
- if depth > 4:
- min_priority = 3
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
|
Disable min_priority filter for now
|
## Code Before:
import board
import pente_exceptions
from ab_state import *
CAPTURE_SCORE_BASE = 120 ** 3
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
if depth > 4:
min_priority = 3
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
## Instruction:
Disable min_priority filter for now
## Code After:
import board
import pente_exceptions
from ab_state import *
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
|
import board
import pente_exceptions
from ab_state import *
-
- CAPTURE_SCORE_BASE = 120 ** 3
class ABGame():
""" This class acts as a bridge between the AlphaBeta code and my code """
def __init__(self, base_game):
s = self.current_state = ABState()
s.set_state(base_game.current_state)
self.base_game = base_game
def to_move(self, state=None):
if state is None:
state = self.current_state
return state.to_move()
def utility(self, state):
return state.utility()
def successors(self, state, depth):
mn = state.get_move_number()
if mn == 1:
# The first black move is always in the centre
brd_size = self.base_game.get_board().get_size()
centre_pos = (brd_size/2, brd_size/2)
p_i = [centre_pos]
else:
min_priority = 0
- if depth > 4:
- min_priority = 3
pos_iter = state.get_iter(state.to_move())
p_i = pos_iter.get_iter(state.to_move_colour(), min_priority)
tried_count = 0
for pos in p_i:
# create an AB_State for each possible move from state
succ = state.create_state(pos)
yield pos, succ
tried_count += 1
if depth > 3 and tried_count >= 2:
return
def terminal_test(self, state):
return state.terminal()
|
cc7de0147d773722db026d2571cc94c6ee01c9e0
|
new/energies/zeeman.py
|
new/energies/zeeman.py
|
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
if not isinstance(H, (list, tuple)) or len(H) != 3:
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
|
import numpy as np
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
if not isinstance(H, (list, tuple, np.ndarray)) or len(H) != 3:
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
|
Add numpy array as a possibility for setting external magnetic field.
|
Add numpy array as a possibility for setting external magnetic field.
|
Python
|
bsd-2-clause
|
fangohr/oommf-python,fangohr/oommf-python,fangohr/oommf-python
|
+ import numpy as np
+
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
- if not isinstance(H, (list, tuple)) or len(H) != 3:
+ if not isinstance(H, (list, tuple, np.ndarray)) or len(H) != 3:
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
|
Add numpy array as a possibility for setting external magnetic field.
|
## Code Before:
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
if not isinstance(H, (list, tuple)) or len(H) != 3:
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
## Instruction:
Add numpy array as a possibility for setting external magnetic field.
## Code After:
import numpy as np
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
if not isinstance(H, (list, tuple, np.ndarray)) or len(H) != 3:
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
|
+ import numpy as np
+
class FixedZeeman(object):
def __init__(self, H, multiplier=1, name='fixedzeeman'):
- if not isinstance(H, (list, tuple)) or len(H) != 3:
+ if not isinstance(H, (list, tuple, np.ndarray)) or len(H) != 3:
? ++++++++++++
raise ValueError('H must be a 3-element tuple or list.')
else:
self.H = H
if not isinstance(multiplier, (float, int)):
raise ValueError('Multiplier must be a positive float or int.')
else:
self.multiplier = multiplier
if not isinstance(name, str):
raise ValueError('name must be a string.')
else:
self.name = name
def get_mif(self):
# Create mif string.
mif = '# FixedZeeman\n'
mif += 'Specify Oxs_FixedZeeman:{} '.format(self.name)
mif += '{\n'
mif += '\tfield {\n'
mif += '\t\tOxs_UniformVectorField {\n'
mif += '\t\t\tvector {'
mif += ' {} {} {} '.format(self.H[0], self.H[1], self.H[2])
mif += '}\n'
mif += '\t\t}\n'
mif += '\t}\n'
mif += '\tmultiplier {}\n'.format(self.multiplier)
mif += '}\n\n'
return mif
|
b7bb3b0782fcece12531b90a17eda98c4ae59be0
|
notes/templatetags/note_tags.py
|
notes/templatetags/note_tags.py
|
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
if note is None:
return u'<NULL NOTE>'
if note.read_date is None:
return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
else:
return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
|
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
if note is None:
return u'<NULL NOTE>'
if note.read_date is None:
return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
else:
return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
if count > 0:
return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
else:
return mark_safe(u'<span>' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
|
Update to display notes count bubble only when new notes are available
|
Update to display notes count bubble only when new notes are available
|
Python
|
agpl-3.0
|
ficlatte/main,HSAR/Ficlatte,stitzelj/Ficlatte,ficlatte/main,HSAR/Ficlatte,stitzelj/Ficlatte
|
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
- if note is None:
+ if note is None:
- return u'<NULL NOTE>'
+ return u'<NULL NOTE>'
- if note.read_date is None:
+ if note.read_date is None:
- return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
+ return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
- else:
+ else:
- return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
+ return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
- count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
+ count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
+ if count > 0:
- return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
+ return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
+ else:
+ return mark_safe(u'<span>' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
- return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
+ return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
|
Update to display notes count bubble only when new notes are available
|
## Code Before:
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
if note is None:
return u'<NULL NOTE>'
if note.read_date is None:
return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
else:
return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
## Instruction:
Update to display notes count bubble only when new notes are available
## Code After:
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
if note is None:
return u'<NULL NOTE>'
if note.read_date is None:
return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
else:
return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
if count > 0:
return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
else:
return mark_safe(u'<span>' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
|
from django.template import Library, Node, TemplateSyntaxError
from django.utils.html import escape
from django.utils.http import urlquote
from django.utils.safestring import mark_safe
from notes.models import Note
from castle.models import Profile
register = Library()
#-----------------------------------------------------------------------------
@register.filter
def note_link(note):
- if note is None:
? ^
+ if note is None:
? ^^^^
- return u'<NULL NOTE>'
? ^^
+ return u'<NULL NOTE>'
? ^^^^^^^^
- if note.read_date is None:
? ^
+ if note.read_date is None:
? ^^^^
- return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
? ^^
+ return mark_safe(u'<b><a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a></b>')
? ^^^^^^^^
- else:
+ else:
- return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
? ^^
+ return mark_safe(u'<a href="/notes/view/' + unicode(note.id) + u'" class="note-link">' + escape(note.subject) + u'</a>')
? ^^^^^^^^
#-----------------------------------------------------------------------------
@register.filter
def inbox_count(profile):
- count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
? ^
+ count = Note.objects.filter(recipient=profile, read_date__isnull=True, recipient_deleted_date__isnull=True).count()
? ^^^^
+ if count > 0:
- return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
+ return mark_safe(u'<span class="inbox-count">' + escape(count) + u'</span>')
? ++++
+ else:
+ return mark_safe(u'<span>' + escape(count) + u'</span>')
#-----------------------------------------------------------------------------
@register.filter
def author_msg(profile):
- return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
? ^
+ return mark_safe(u'<a class="btn btn-success author-msg-btn" href="/notes/compose?recipient=' + escape(profile.pen_name) + u'" type="button"><span class="glyphicon glyphicon-pencil"></span> Message ' + escape(profile.pen_name) + u'</a>')
? ^^^^
|
0e02b72c8c37fa5c51a0036ba67a57c99bc1da86
|
housecanary/__init__.py
|
housecanary/__init__.py
|
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
__version__ = '0.6.5'
|
__version__ = '0.6.5'
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
|
Revert moving the __version__ declaration which broke things
|
Revert moving the __version__ declaration which broke things
|
Python
|
mit
|
housecanary/hc-api-python
|
+ __version__ = '0.6.5'
+
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
- __version__ = '0.6.5'
-
|
Revert moving the __version__ declaration which broke things
|
## Code Before:
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
__version__ = '0.6.5'
## Instruction:
Revert moving the __version__ declaration which broke things
## Code After:
__version__ = '0.6.5'
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
|
+ __version__ = '0.6.5'
+
from housecanary.apiclient import ApiClient
from housecanary.excel import export_analytics_data_to_excel
from housecanary.excel import export_analytics_data_to_csv
from housecanary.excel import concat_excel_reports
from housecanary.excel import utilities
-
- __version__ = '0.6.5'
|
f590080fc4d431b333f73ad548a50bc24d4fcf5b
|
fuzzer/main.py
|
fuzzer/main.py
|
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
harness.register_testcase(t)
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
|
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
harness.register_testcase(bytes(t, 'ascii'))
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
|
Send char string instead of widechar string
|
Send char string instead of widechar string
|
Python
|
apache-2.0
|
jaybosamiya/fuzzing-numpy,jaybosamiya/fuzzing-numpy,jaybosamiya/fuzzing-numpy
|
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
- harness.register_testcase(t)
+ harness.register_testcase(bytes(t, 'ascii'))
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
|
Send char string instead of widechar string
|
## Code Before:
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
harness.register_testcase(t)
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
## Instruction:
Send char string instead of widechar string
## Code After:
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
harness.register_testcase(bytes(t, 'ascii'))
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
|
import generator
from ctypes import CDLL
import numpy as np
# Initializes the harness and sets it up for work
harness = CDLL("harness/harness.so")
while True:
t = generator.generate()
- harness.register_testcase(t)
+ harness.register_testcase(bytes(t, 'ascii'))
? ++ +++++++++++++ +
try:
exec(t, {'np':np})
except:
# If the exec fails, then we should not store
continue
generator.register(t)
|
c4bf617dddd15e77974b000e8fa90750e1761386
|
siteconfig/__init__.py
|
siteconfig/__init__.py
|
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
get = config.get
|
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
get = config.get
get_bool = config.get_bool
|
Add get_bool to package exports
|
Add get_bool to package exports
|
Python
|
bsd-3-clause
|
mikeboers/siteconfig,mikeboers/siteconfig
|
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
+
get = config.get
+ get_bool = config.get_bool
|
Add get_bool to package exports
|
## Code Before:
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
get = config.get
## Instruction:
Add get_bool to package exports
## Code After:
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
get = config.get
get_bool = config.get_bool
|
from .configobj import Config
config = Config.from_environ()
# Add the data and some of the API as attributes of the top-level package.
globals().update(config)
+
get = config.get
+ get_bool = config.get_bool
|
c0787c468e1b71d7e9db93b5f5990ae9bb506d82
|
pystruct/datasets/dataset_loaders.py
|
pystruct/datasets/dataset_loaders.py
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'))
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'))
return cPickle.load(data_file)
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'),'rb')
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'),'rb')
return cPickle.load(data_file)
|
FIX other two sample data load for Windows
|
FIX other two sample data load for Windows
|
Python
|
bsd-2-clause
|
massmutual/pystruct,pystruct/pystruct,amueller/pystruct,d-mittal/pystruct,wattlebird/pystruct,pystruct/pystruct,d-mittal/pystruct,wattlebird/pystruct,massmutual/pystruct,amueller/pystruct
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
- data_file = open(join(module_path, 'scene.pickle'))
+ data_file = open(join(module_path, 'scene.pickle'),'rb')
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
- data_file = open(join(module_path, 'snakes.pickle'))
+ data_file = open(join(module_path, 'snakes.pickle'),'rb')
return cPickle.load(data_file)
|
FIX other two sample data load for Windows
|
## Code Before:
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'))
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'))
return cPickle.load(data_file)
## Instruction:
FIX other two sample data load for Windows
## Code After:
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
data_file = open(join(module_path, 'scene.pickle'),'rb')
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
data_file = open(join(module_path, 'snakes.pickle'),'rb')
return cPickle.load(data_file)
|
import cPickle
from os.path import dirname
from os.path import join
import numpy as np
def load_letters():
"""Load the OCR letters dataset.
This is a chain classification task.
Each example consists of a word, segmented into letters.
The first letter of each word is ommited from the data,
as it was a capital letter (in contrast to all other letters).
"""
module_path = dirname(__file__)
data_file = open(join(module_path, 'letters.pickle'),'rb')
data = cPickle.load(data_file)
# we add an easy to use image representation:
data['images'] = [np.hstack([l.reshape(16, 8) for l in word])
for word in data['data']]
return data
def load_scene():
module_path = dirname(__file__)
- data_file = open(join(module_path, 'scene.pickle'))
+ data_file = open(join(module_path, 'scene.pickle'),'rb')
? +++++
return cPickle.load(data_file)
def load_snakes():
module_path = dirname(__file__)
- data_file = open(join(module_path, 'snakes.pickle'))
+ data_file = open(join(module_path, 'snakes.pickle'),'rb')
? +++++
return cPickle.load(data_file)
|
866f95cfb0db14da0596efe41a128baf2a3a1cfe
|
django_basic_tinymce_flatpages/admin.py
|
django_basic_tinymce_flatpages/admin.py
|
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
|
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
fields = '__all__'
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
|
Fix form PageForm needs updating.
|
Fix form PageForm needs updating.
|
Python
|
bsd-3-clause
|
ad-m/django-basic-tinymce-flatpages
|
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
+ fields = '__all__'
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
|
Fix form PageForm needs updating.
|
## Code Before:
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
## Instruction:
Fix form PageForm needs updating.
## Code After:
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
fields = '__all__'
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
|
from django.conf import settings
from django.contrib import admin
from django.contrib.flatpages.admin import FlatpageForm, FlatPageAdmin
from django.contrib.flatpages.models import FlatPage
from django.utils.module_loading import import_string
FLATPAGE_WIDGET = getattr(settings, 'FLATPAGE_WIDGET', 'tinymce.widgets.TinyMCE')
FLATPAGE_WIDGET_KWARGS = getattr(settings, 'FLATPAGE_WIDGET_KWARGS',
{'attrs': {'cols': 100, 'rows': 15}})
class PageForm(FlatpageForm):
class Meta:
model = FlatPage
widgets = {
'content': import_string(FLATPAGE_WIDGET)(**FLATPAGE_WIDGET_KWARGS),
}
+ fields = '__all__'
class PageAdmin(FlatPageAdmin):
"""
Page Admin
"""
form = PageForm
admin.site.unregister(FlatPage)
admin.site.register(FlatPage, PageAdmin)
|
6672a0634265e09366a9274d3c2a04afca49cf02
|
dirtree_filter.py
|
dirtree_filter.py
|
class DirTreeFilter(object):
def __init__(self, show_hidden=False, show_files=True, show_dirs=True):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
self.hidden_exts = [".pyc", ".pyo", ".o", ".a", ".obj", ".lib", ".swp", "~"]
self.hidden_dirs = ["CVS", "__pycache__"]
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
elif info.is_dir:
if not self.show_dirs:
return False
if info.filename in self.hidden_dirs:
return False
for ext in self.hidden_exts:
if info.filename.endswith(ext):
return False
if info.filename.startswith(".#"):
return False
return True
|
import re
def compile_file_patterns(patterns):
return re.compile("$%s^" % "|".join("(%s)" % re.escape(p).replace("\\*", ".*") for p in patterns))
hidden_files = [".*", "*~", "*.swp", "*.pyc", "*.pyo", "*.o", "*.a", "*.obj", "*.lib", "*.class"]
hidden_dirs = ["CVS", "__pycache__"]
class DirTreeFilter(object):
def __init__(self, show_hidden=False, show_files=True, show_dirs=True,
hidden_files=hidden_files, hidden_dirs=hidden_dirs):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
self.r_hidden_file = compile_file_patterns(hidden_files)
self.r_hidden_dir = compile_file_patterns(hidden_dirs)
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
if info.is_dir:
if not self.show_dirs:
return False
if self.r_hidden_dir.match(info.filename):
return False
else:
if self.r_hidden_file.match(info.filename):
return False
return True
|
Use file patterns compiled to regular expressions to match hidden files.
|
Use file patterns compiled to regular expressions to match hidden files.
|
Python
|
mit
|
shaurz/devo
|
+ import re
+
+ def compile_file_patterns(patterns):
+ return re.compile("$%s^" % "|".join("(%s)" % re.escape(p).replace("\\*", ".*") for p in patterns))
+
+ hidden_files = [".*", "*~", "*.swp", "*.pyc", "*.pyo", "*.o", "*.a", "*.obj", "*.lib", "*.class"]
+ hidden_dirs = ["CVS", "__pycache__"]
+
class DirTreeFilter(object):
- def __init__(self, show_hidden=False, show_files=True, show_dirs=True):
+ def __init__(self, show_hidden=False, show_files=True, show_dirs=True,
+ hidden_files=hidden_files, hidden_dirs=hidden_dirs):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
- self.hidden_exts = [".pyc", ".pyo", ".o", ".a", ".obj", ".lib", ".swp", "~"]
- self.hidden_dirs = ["CVS", "__pycache__"]
+ self.r_hidden_file = compile_file_patterns(hidden_files)
+ self.r_hidden_dir = compile_file_patterns(hidden_dirs)
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
- elif info.is_dir:
+ if info.is_dir:
if not self.show_dirs:
return False
- if info.filename in self.hidden_dirs:
+ if self.r_hidden_dir.match(info.filename):
return False
- for ext in self.hidden_exts:
- if info.filename.endswith(ext):
+ else:
+ if self.r_hidden_file.match(info.filename):
return False
- if info.filename.startswith(".#"):
- return False
return True
|
Use file patterns compiled to regular expressions to match hidden files.
|
## Code Before:
class DirTreeFilter(object):
def __init__(self, show_hidden=False, show_files=True, show_dirs=True):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
self.hidden_exts = [".pyc", ".pyo", ".o", ".a", ".obj", ".lib", ".swp", "~"]
self.hidden_dirs = ["CVS", "__pycache__"]
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
elif info.is_dir:
if not self.show_dirs:
return False
if info.filename in self.hidden_dirs:
return False
for ext in self.hidden_exts:
if info.filename.endswith(ext):
return False
if info.filename.startswith(".#"):
return False
return True
## Instruction:
Use file patterns compiled to regular expressions to match hidden files.
## Code After:
import re
def compile_file_patterns(patterns):
return re.compile("$%s^" % "|".join("(%s)" % re.escape(p).replace("\\*", ".*") for p in patterns))
hidden_files = [".*", "*~", "*.swp", "*.pyc", "*.pyo", "*.o", "*.a", "*.obj", "*.lib", "*.class"]
hidden_dirs = ["CVS", "__pycache__"]
class DirTreeFilter(object):
def __init__(self, show_hidden=False, show_files=True, show_dirs=True,
hidden_files=hidden_files, hidden_dirs=hidden_dirs):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
self.r_hidden_file = compile_file_patterns(hidden_files)
self.r_hidden_dir = compile_file_patterns(hidden_dirs)
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
if info.is_dir:
if not self.show_dirs:
return False
if self.r_hidden_dir.match(info.filename):
return False
else:
if self.r_hidden_file.match(info.filename):
return False
return True
|
+ import re
+
+ def compile_file_patterns(patterns):
+ return re.compile("$%s^" % "|".join("(%s)" % re.escape(p).replace("\\*", ".*") for p in patterns))
+
+ hidden_files = [".*", "*~", "*.swp", "*.pyc", "*.pyo", "*.o", "*.a", "*.obj", "*.lib", "*.class"]
+ hidden_dirs = ["CVS", "__pycache__"]
+
class DirTreeFilter(object):
- def __init__(self, show_hidden=False, show_files=True, show_dirs=True):
? ^^
+ def __init__(self, show_hidden=False, show_files=True, show_dirs=True,
? ^
+ hidden_files=hidden_files, hidden_dirs=hidden_dirs):
self.show_hidden = show_hidden
self.show_files = show_files
self.show_dirs = show_dirs
- self.hidden_exts = [".pyc", ".pyo", ".o", ".a", ".obj", ".lib", ".swp", "~"]
- self.hidden_dirs = ["CVS", "__pycache__"]
+ self.r_hidden_file = compile_file_patterns(hidden_files)
+ self.r_hidden_dir = compile_file_patterns(hidden_dirs)
def __call__(self, info):
if info.hidden and not self.show_hidden:
return False
if info.is_file and not self.show_files:
return False
- elif info.is_dir:
? --
+ if info.is_dir:
if not self.show_dirs:
return False
- if info.filename in self.hidden_dirs:
+ if self.r_hidden_dir.match(info.filename):
return False
- for ext in self.hidden_exts:
- if info.filename.endswith(ext):
+ else:
+ if self.r_hidden_file.match(info.filename):
return False
- if info.filename.startswith(".#"):
- return False
return True
|
aa5259efac8f7fbe8e2afd263198feaaa45fc4c3
|
tingbot/platform_specific/__init__.py
|
tingbot/platform_specific/__init__.py
|
import platform
def is_tingbot():
"""return True if running as a tingbot. We can update this function to be more smart in future"""
return platform.machine().startswith('armv71')
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
|
import platform, os
def is_tingbot():
"""
Return True if running as a tingbot.
"""
# TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps.
return 'TB_RUN_ON_LCD' in os.environ
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
|
Change test for running on Tingbot
|
Change test for running on Tingbot
|
Python
|
bsd-2-clause
|
furbrain/tingbot-python
|
- import platform
+ import platform, os
def is_tingbot():
- """return True if running as a tingbot. We can update this function to be more smart in future"""
- return platform.machine().startswith('armv71')
+ """
+ Return True if running as a tingbot.
+ """
+ # TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps.
+ return 'TB_RUN_ON_LCD' in os.environ
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
|
Change test for running on Tingbot
|
## Code Before:
import platform
def is_tingbot():
"""return True if running as a tingbot. We can update this function to be more smart in future"""
return platform.machine().startswith('armv71')
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
## Instruction:
Change test for running on Tingbot
## Code After:
import platform, os
def is_tingbot():
"""
Return True if running as a tingbot.
"""
# TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps.
return 'TB_RUN_ON_LCD' in os.environ
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
|
- import platform
+ import platform, os
? ++++
def is_tingbot():
- """return True if running as a tingbot. We can update this function to be more smart in future"""
- return platform.machine().startswith('armv71')
+ """
+ Return True if running as a tingbot.
+ """
+ # TB_RUN_ON_LCD is an environment variable set by tbprocessd when running tingbot apps.
+ return 'TB_RUN_ON_LCD' in os.environ
if platform.system() == 'Darwin':
from osx import fixup_env, create_main_surface, register_button_callback
elif is_tingbot():
from pi import fixup_env, create_main_surface, register_button_callback
else:
from sdl_wrapper import fixup_env, create_main_surface, register_button_callback
|
ac5b065e948a923f71b5f3bc9e98bcb8791a46c9
|
git_code_debt/write_logic.py
|
git_code_debt/write_logic.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
for metric_id in metric_ids:
db.execute(
"INSERT INTO metric_names ('name') VALUES (?)", [metric_id],
)
def insert_metric_values(db, metric_values, metric_mapping, commit):
for metric_name, value in metric_values.items():
metric_id = metric_mapping[metric_name]
db.execute(
'\n'.join((
'INSERT INTO metric_data',
'(sha, metric_id, timestamp, running_value)',
'VALUES (?, ?, ?, ?)',
)),
[commit.sha, metric_id, commit.date, value],
)
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
for metric in metrics:
# Sparse table, ignore zero.
if metric.value == 0:
continue
metric_id = metric_mapping[metric.name]
db.execute(
'\n'.join((
'INSERT INTO metric_changes',
'(sha, metric_id, value)',
'VALUES (?, ?, ?)',
)),
[commit.sha, metric_id, metric.value],
)
|
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
values = [[x] for x in metric_ids]
db.executemany("INSERT INTO metric_names ('name') VALUES (?)", values)
def insert_metric_values(db, metric_values, metric_mapping, commit):
values = [
[commit.sha, metric_mapping[metric_name], commit.date, value]
for metric_name, value in metric_values.items()
]
db.executemany(
'INSERT INTO metric_data\n'
'(sha, metric_id, timestamp, running_value)\n'
'VALUES (?, ?, ?, ?)\n',
values,
)
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
values = [
[commit.sha, metric_mapping[metric.name], metric.value]
for metric in metrics
# Sparse table, ignore zero.
if metric.value != 0
]
db.executemany(
'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)',
values,
)
|
Use executemany instead of execute in write logic
|
Use executemany instead of execute in write logic
|
Python
|
mit
|
Yelp/git-code-debt,Yelp/git-code-debt,Yelp/git-code-debt,Yelp/git-code-debt
|
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
+ values = [[x] for x in metric_ids]
+ db.executemany("INSERT INTO metric_names ('name') VALUES (?)", values)
- for metric_id in metric_ids:
- db.execute(
- "INSERT INTO metric_names ('name') VALUES (?)", [metric_id],
- )
def insert_metric_values(db, metric_values, metric_mapping, commit):
+ values = [
+ [commit.sha, metric_mapping[metric_name], commit.date, value]
- for metric_name, value in metric_values.items():
+ for metric_name, value in metric_values.items()
- metric_id = metric_mapping[metric_name]
+ ]
- db.execute(
+ db.executemany(
- '\n'.join((
- 'INSERT INTO metric_data',
+ 'INSERT INTO metric_data\n'
- '(sha, metric_id, timestamp, running_value)',
+ '(sha, metric_id, timestamp, running_value)\n'
- 'VALUES (?, ?, ?, ?)',
+ 'VALUES (?, ?, ?, ?)\n',
+ values,
+ )
- )),
- [commit.sha, metric_id, commit.date, value],
- )
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
+ values = [
+ [commit.sha, metric_mapping[metric.name], metric.value]
- for metric in metrics:
+ for metric in metrics
# Sparse table, ignore zero.
- if metric.value == 0:
+ if metric.value != 0
- continue
+ ]
+ db.executemany(
+ 'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)',
+ values,
+ )
- metric_id = metric_mapping[metric.name]
- db.execute(
- '\n'.join((
- 'INSERT INTO metric_changes',
- '(sha, metric_id, value)',
- 'VALUES (?, ?, ?)',
- )),
- [commit.sha, metric_id, metric.value],
- )
-
|
Use executemany instead of execute in write logic
|
## Code Before:
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
for metric_id in metric_ids:
db.execute(
"INSERT INTO metric_names ('name') VALUES (?)", [metric_id],
)
def insert_metric_values(db, metric_values, metric_mapping, commit):
for metric_name, value in metric_values.items():
metric_id = metric_mapping[metric_name]
db.execute(
'\n'.join((
'INSERT INTO metric_data',
'(sha, metric_id, timestamp, running_value)',
'VALUES (?, ?, ?, ?)',
)),
[commit.sha, metric_id, commit.date, value],
)
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
for metric in metrics:
# Sparse table, ignore zero.
if metric.value == 0:
continue
metric_id = metric_mapping[metric.name]
db.execute(
'\n'.join((
'INSERT INTO metric_changes',
'(sha, metric_id, value)',
'VALUES (?, ?, ?)',
)),
[commit.sha, metric_id, metric.value],
)
## Instruction:
Use executemany instead of execute in write logic
## Code After:
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
values = [[x] for x in metric_ids]
db.executemany("INSERT INTO metric_names ('name') VALUES (?)", values)
def insert_metric_values(db, metric_values, metric_mapping, commit):
values = [
[commit.sha, metric_mapping[metric_name], commit.date, value]
for metric_name, value in metric_values.items()
]
db.executemany(
'INSERT INTO metric_data\n'
'(sha, metric_id, timestamp, running_value)\n'
'VALUES (?, ?, ?, ?)\n',
values,
)
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
values = [
[commit.sha, metric_mapping[metric.name], metric.value]
for metric in metrics
# Sparse table, ignore zero.
if metric.value != 0
]
db.executemany(
'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)',
values,
)
|
from __future__ import absolute_import
from __future__ import unicode_literals
def insert_metric_ids(db, metric_ids):
+ values = [[x] for x in metric_ids]
+ db.executemany("INSERT INTO metric_names ('name') VALUES (?)", values)
- for metric_id in metric_ids:
- db.execute(
- "INSERT INTO metric_names ('name') VALUES (?)", [metric_id],
- )
def insert_metric_values(db, metric_values, metric_mapping, commit):
+ values = [
+ [commit.sha, metric_mapping[metric_name], commit.date, value]
- for metric_name, value in metric_values.items():
? -
+ for metric_name, value in metric_values.items()
? ++++
- metric_id = metric_mapping[metric_name]
+ ]
- db.execute(
? ----
+ db.executemany(
? ++++
- '\n'.join((
- 'INSERT INTO metric_data',
? -------- -
+ 'INSERT INTO metric_data\n'
? ++
- '(sha, metric_id, timestamp, running_value)',
? -------- -
+ '(sha, metric_id, timestamp, running_value)\n'
? ++
- 'VALUES (?, ?, ?, ?)',
? --------
+ 'VALUES (?, ?, ?, ?)\n',
? ++
+ values,
+ )
- )),
- [commit.sha, metric_id, commit.date, value],
- )
def insert_metric_changes(db, metrics, metric_mapping, commit):
"""Insert into the metric_changes tables.
:param metrics: `list` of `Metric` objects
:param dict metric_mapping: Maps metric names to ids
:param Commit commit:
"""
+ values = [
+ [commit.sha, metric_mapping[metric.name], metric.value]
- for metric in metrics:
? -
+ for metric in metrics
? ++++
# Sparse table, ignore zero.
- if metric.value == 0:
? ^ -
+ if metric.value != 0
? ^
+ ]
- continue
-
- metric_id = metric_mapping[metric.name]
- db.execute(
? ----
+ db.executemany(
? ++++
+ 'INSERT INTO metric_changes (sha, metric_id, value) VALUES (?, ?, ?)',
+ values,
+ )
- '\n'.join((
- 'INSERT INTO metric_changes',
- '(sha, metric_id, value)',
- 'VALUES (?, ?, ?)',
- )),
- [commit.sha, metric_id, metric.value],
- )
|
f511af4fc89a170914a86de1704e8e842ffd6b6d
|
test/test_configuration.py
|
test/test_configuration.py
|
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
|
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
sys.path.append(ROOT_DIR)
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
|
Update sys.path to import heatmap
|
Update sys.path to import heatmap
|
Python
|
agpl-3.0
|
hugovk/heatmap,hugovk/heatmap,sethoscope/heatmap,sethoscope/heatmap
|
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
+ ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
+ sys.path.append(ROOT_DIR)
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
-
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
|
Update sys.path to import heatmap
|
## Code Before:
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
## Instruction:
Update sys.path to import heatmap
## Code After:
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
sys.path.append(ROOT_DIR)
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
|
"""Test coordinate classes."""
import sys
try:
import unittest2 as unittest # Python 2.6
except ImportError:
import unittest
+ ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
+ sys.path.append(ROOT_DIR)
import heatmap as hm
class Tests(unittest.TestCase):
# To remove Python 3's
# "DeprecationWarning: Please use assertRaisesRegex instead"
if sys.version_info[0] == 2:
assertRaisesRegex = unittest.TestCase.assertRaisesRegexp
def test_basic(self):
'''Test Configuration class.'''
-
# Act
config = hm.Configuration(use_defaults=True)
# Assert
self.assertEqual(config.margin, 0)
self.assertEqual(config.frequency, 1)
def test_fill_missing_no_input(self):
'''Test Configuration class.'''
# Arrange
config = hm.Configuration(use_defaults=True)
# Act / Assert
with self.assertRaisesRegex(ValueError, "no input specified"):
config.fill_missing()
if __name__ == '__main__':
unittest.main()
|
309439f65bb668aba85a31a46b2633a46ee55777
|
apps/careeropportunity/migrations/0001_initial.py
|
apps/careeropportunity/migrations/0001_initial.py
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_squashed_0003_company_image'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
|
Revert "Change careeropportunity migration dep"
|
Revert "Change careeropportunity migration dep"
This reverts commit 60fdfab7e3b557e46276c225ff159f5773930525.
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
- ('companyprofile', '0001_squashed_0003_company_image'),
+ ('companyprofile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
|
Revert "Change careeropportunity migration dep"
|
## Code Before:
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_squashed_0003_company_image'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
## Instruction:
Revert "Change careeropportunity migration dep"
## Code After:
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('companyprofile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
- ('companyprofile', '0001_squashed_0003_company_image'),
+ ('companyprofile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CareerOpportunity',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100, verbose_name='tittel')),
('ingress', models.CharField(max_length=250, verbose_name='ingress')),
('description', models.TextField(verbose_name='beskrivelse')),
('start', models.DateTimeField(verbose_name='aktiv fra')),
('end', models.DateTimeField(verbose_name='aktiv til')),
('featured', models.BooleanField(default=False, verbose_name='fremhevet')),
('company', models.ForeignKey(related_name='company', to='companyprofile.Company')),
],
options={
'verbose_name': 'karrieremulighet',
'verbose_name_plural': 'karrieremuligheter',
'permissions': (('view_careeropportunity', 'View CareerOpportunity'),),
},
bases=(models.Model,),
),
]
|
e1240aa33b286ba52507128458fc6d6b3b68dfb3
|
statsmodels/stats/multicomp.py
|
statsmodels/stats/multicomp.py
|
from statsmodels.sandbox.stats.multicomp import MultiComparison
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
|
from statsmodels.sandbox.stats.multicomp import tukeyhsd, MultiComparison
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
|
Put back an import that my IDE incorrectly flagged as unused
|
Put back an import that my IDE incorrectly flagged as unused
|
Python
|
bsd-3-clause
|
gef756/statsmodels,detrout/debian-statsmodels,detrout/debian-statsmodels,bzero/statsmodels,YihaoLu/statsmodels,wzbozon/statsmodels,edhuckle/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,josef-pkt/statsmodels,cbmoore/statsmodels,rgommers/statsmodels,hlin117/statsmodels,ChadFulton/statsmodels,edhuckle/statsmodels,hainm/statsmodels,musically-ut/statsmodels,gef756/statsmodels,edhuckle/statsmodels,saketkc/statsmodels,jseabold/statsmodels,jstoxrocky/statsmodels,adammenges/statsmodels,waynenilsen/statsmodels,bzero/statsmodels,nvoron23/statsmodels,wdurhamh/statsmodels,huongttlan/statsmodels,alekz112/statsmodels,adammenges/statsmodels,nguyentu1602/statsmodels,waynenilsen/statsmodels,yl565/statsmodels,phobson/statsmodels,alekz112/statsmodels,wzbozon/statsmodels,huongttlan/statsmodels,saketkc/statsmodels,hainm/statsmodels,hlin117/statsmodels,kiyoto/statsmodels,YihaoLu/statsmodels,waynenilsen/statsmodels,bashtage/statsmodels,wzbozon/statsmodels,jseabold/statsmodels,gef756/statsmodels,phobson/statsmodels,bashtage/statsmodels,YihaoLu/statsmodels,rgommers/statsmodels,astocko/statsmodels,bsipocz/statsmodels,edhuckle/statsmodels,bzero/statsmodels,jseabold/statsmodels,bert9bert/statsmodels,hlin117/statsmodels,saketkc/statsmodels,statsmodels/statsmodels,wdurhamh/statsmodels,edhuckle/statsmodels,wkfwkf/statsmodels,nvoron23/statsmodels,DonBeo/statsmodels,ChadFulton/statsmodels,wdurhamh/statsmodels,ChadFulton/statsmodels,jseabold/statsmodels,wwf5067/statsmodels,bsipocz/statsmodels,nguyentu1602/statsmodels,kiyoto/statsmodels,josef-pkt/statsmodels,adammenges/statsmodels,wkfwkf/statsmodels,ChadFulton/statsmodels,detrout/debian-statsmodels,hainm/statsmodels,wwf5067/statsmodels,phobson/statsmodels,wkfwkf/statsmodels,bert9bert/statsmodels,alekz112/statsmodels,nguyentu1602/statsmodels,hainm/statsmodels,bert9bert/statsmodels,yl565/statsmodels,nguyentu1602/statsmodels,Averroes/statsmodels,bzero/statsmodels,kiyoto/statsmodels,wwf5067/statsmodels,nvoron23/statsmodels,statsmodels/statsmodels,YihaoLu/statsmodels,bashtage/statsmodels,rgommers/statsmodels,bert9bert/statsmodels,bashtage/statsmodels,bert9bert/statsmodels,saketkc/statsmodels,DonBeo/statsmodels,musically-ut/statsmodels,Averroes/statsmodels,cbmoore/statsmodels,kiyoto/statsmodels,nvoron23/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,Averroes/statsmodels,astocko/statsmodels,huongttlan/statsmodels,wkfwkf/statsmodels,wdurhamh/statsmodels,rgommers/statsmodels,josef-pkt/statsmodels,wdurhamh/statsmodels,josef-pkt/statsmodels,nvoron23/statsmodels,bzero/statsmodels,wzbozon/statsmodels,jstoxrocky/statsmodels,YihaoLu/statsmodels,phobson/statsmodels,hlin117/statsmodels,alekz112/statsmodels,astocko/statsmodels,musically-ut/statsmodels,gef756/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,DonBeo/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,adammenges/statsmodels,yl565/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,rgommers/statsmodels,astocko/statsmodels,bashtage/statsmodels,phobson/statsmodels,Averroes/statsmodels,huongttlan/statsmodels,yl565/statsmodels,jstoxrocky/statsmodels,wkfwkf/statsmodels,bsipocz/statsmodels,cbmoore/statsmodels,gef756/statsmodels,wwf5067/statsmodels,jstoxrocky/statsmodels,DonBeo/statsmodels,DonBeo/statsmodels,bsipocz/statsmodels,kiyoto/statsmodels,wzbozon/statsmodels,detrout/debian-statsmodels,yl565/statsmodels,cbmoore/statsmodels,saketkc/statsmodels,waynenilsen/statsmodels,statsmodels/statsmodels
|
- from statsmodels.sandbox.stats.multicomp import MultiComparison
+ from statsmodels.sandbox.stats.multicomp import tukeyhsd, MultiComparison
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
|
Put back an import that my IDE incorrectly flagged as unused
|
## Code Before:
from statsmodels.sandbox.stats.multicomp import MultiComparison
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
## Instruction:
Put back an import that my IDE incorrectly flagged as unused
## Code After:
from statsmodels.sandbox.stats.multicomp import tukeyhsd, MultiComparison
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
|
- from statsmodels.sandbox.stats.multicomp import MultiComparison
+ from statsmodels.sandbox.stats.multicomp import tukeyhsd, MultiComparison
? ++++++++++
def pairwise_tukeyhsd(endog, groups, alpha=0.05):
'''calculate all pairwise comparisons with TukeyHSD confidence intervals
this is just a wrapper around tukeyhsd method of MultiComparison
Parameters
----------
endog : ndarray, float, 1d
response variable
groups : ndarray, 1d
array with groups, can be string or integers
alpha : float
significance level for the test
Returns
-------
results : TukeyHSDResults instance
A results class containing relevant data and some post-hoc
calculations
See Also
--------
MultiComparison
tukeyhsd
statsmodels.sandbox.stats.multicomp.TukeyHSDResults
'''
return MultiComparison(endog, groups).tukeyhsd(alpha=alpha)
|
5c7b33574550d37454b4362fa0896a4dad6e98d1
|
aesthetic/output/gif.py
|
aesthetic/output/gif.py
|
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
image.save(out, save_all=True,
append_images=list(more_images),
loop=1000,
duration=50)
|
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
delay_ms = 1000 * 0.035
image.save(out, save_all=True,
append_images=list(more_images),
duration=delay_ms, optimize=True)
|
Optimize GIF palette (too many colors right now), better GIF timing options.
|
Optimize GIF palette (too many colors right now), better GIF timing options.
|
Python
|
apache-2.0
|
gnoack/aesthetic
|
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
+ delay_ms = 1000 * 0.035
image.save(out, save_all=True,
append_images=list(more_images),
+ duration=delay_ms, optimize=True)
- loop=1000,
- duration=50)
|
Optimize GIF palette (too many colors right now), better GIF timing options.
|
## Code Before:
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
image.save(out, save_all=True,
append_images=list(more_images),
loop=1000,
duration=50)
## Instruction:
Optimize GIF palette (too many colors right now), better GIF timing options.
## Code After:
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
delay_ms = 1000 * 0.035
image.save(out, save_all=True,
append_images=list(more_images),
duration=delay_ms, optimize=True)
|
from PIL import Image
from PIL import ImageDraw
def render(animation, out, scale=8):
images = [render_frame(colors, scale=scale) for colors in animation]
save_gif(out, *images)
def render_frame(colors, scale=8):
led_count = 53
size = (led_count * scale, scale)
im = Image.new("RGB", size, "black")
d = ImageDraw.Draw(im)
for idx, color in enumerate(colors):
color = tuple(map(int, color))
x0 = scale * idx
y0 = 0
x1 = scale * (idx + 1)
y1 = scale
d.rectangle((x0, y0, x1, y1), fill=color)
return im
def save_gif(out, image, *more_images):
+ delay_ms = 1000 * 0.035
image.save(out, save_all=True,
append_images=list(more_images),
+ duration=delay_ms, optimize=True)
- loop=1000,
- duration=50)
|
0b5a657339870c7669082c39f8290c88732aa92e
|
extractor.py
|
extractor.py
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
if __name__ == '__main__':
runner = ExtractionRunner()
runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
def get_extraction_runner():
runner = ExtractionRunner()
runner.add_runnable(grobid.GrobidPlainTextExtractor)
# OR
# runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
return runner
if __name__ == '__main__':
runner = get_extraction_runner()
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
|
Make code a little cleaner
|
Make code a little cleaner
|
Python
|
apache-2.0
|
Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor,Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor,Tiger66639/new-csx-extractor,Tiger66639/new-csx-extractor,SeerLabs/new-csx-extractor,SeerLabs/new-csx-extractor
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
- if __name__ == '__main__':
+ def get_extraction_runner():
runner = ExtractionRunner()
+
+ runner.add_runnable(grobid.GrobidPlainTextExtractor)
+ # OR
- runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
+ # runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
+
runner.add_runnable(filters.AcademicPaperFilter)
+
+ return runner
+
+
+ if __name__ == '__main__':
+ runner = get_extraction_runner()
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
|
Make code a little cleaner
|
## Code Before:
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
if __name__ == '__main__':
runner = ExtractionRunner()
runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
## Instruction:
Make code a little cleaner
## Code After:
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
def get_extraction_runner():
runner = ExtractionRunner()
runner.add_runnable(grobid.GrobidPlainTextExtractor)
# OR
# runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
runner.add_runnable(filters.AcademicPaperFilter)
return runner
if __name__ == '__main__':
runner = get_extraction_runner()
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
|
from extraction.core import ExtractionRunner
from extraction.runnables import Extractor, RunnableError, Filter, ExtractorResult
import os
import sys
import grobid
import pdfbox
import filters
- if __name__ == '__main__':
+ def get_extraction_runner():
runner = ExtractionRunner()
+
+ runner.add_runnable(grobid.GrobidPlainTextExtractor)
+ # OR
- runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
+ # runner.add_runnable(pdfbox.PDFBoxPlainTextExtractor)
? ++
+
runner.add_runnable(filters.AcademicPaperFilter)
+
+ return runner
+
+
+ if __name__ == '__main__':
+ runner = get_extraction_runner()
argc = len(sys.argv)
if argc == 2:
runner.run_from_file(sys.argv[1])
elif argc == 3:
runner.run_from_file(sys.argv[1], output_dir = sys.argv[2])
else:
print("USAGE: python {0} path_to_pdf [output_directory]")
|
d2d822a9fb60bbc8ded7f9e3c70d91cf25f794b2
|
src/volunteers/models.py
|
src/volunteers/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
template += u' (grupp, {participant_count} osalejat)'
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
template += u' ({group_name} grupp, {participant_count} osalejat)'
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
|
Add group name to volunteer string representation
|
Add group name to volunteer string representation
|
Python
|
mit
|
mrts/foodbank-campaign,mrts/foodbank-campaign,mrts/foodbank-campaign,mrts/foodbank-campaign
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
- template += u' (grupp, {participant_count} osalejat)'
+ template += u' ({group_name} grupp, {participant_count} osalejat)'
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
|
Add group name to volunteer string representation
|
## Code Before:
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
template += u' (grupp, {participant_count} osalejat)'
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
## Instruction:
Add group name to volunteer string representation
## Code After:
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
template += u' ({group_name} grupp, {participant_count} osalejat)'
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.core.validators import MinValueValidator
class Volunteer(models.Model):
first_name = models.CharField(_('First name'), max_length=100)
last_name = models.CharField(_('Last name'), max_length=100)
age = models.PositiveIntegerField(_('Age'))
phone = models.CharField(_('Phone'), max_length=100)
email = models.EmailField(_('E-mail'), unique=True)
is_group = models.BooleanField(_('Is group representative'), default=False)
group_name = models.CharField(_('Group/organization name'), max_length=100,
blank=True)
participant_count = models.PositiveIntegerField(_('Participant count'),
default=1, validators=[MinValueValidator(1)])
class Meta:
verbose_name = _('Volunteer')
verbose_name_plural = _('Volunteers')
@property
def name(self):
template = u'{first_name} {last_name}'
if self.is_group:
- template += u' (grupp, {participant_count} osalejat)'
+ template += u' ({group_name} grupp, {participant_count} osalejat)'
? +++++++++++++
return template.format(**self.__dict__)
def __unicode__(self):
return self.name
|
98335de4b87638eff9613279bdd106651d4aefe1
|
catt/__init__.py
|
catt/__init__.py
|
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
|
import sys
if sys.version_info.major < 3:
print("This program requires Python 3 and above to run.")
sys.exit(1)
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
|
Make catt refuse to install under 2 more
|
fix: Make catt refuse to install under 2 more
|
Python
|
bsd-2-clause
|
skorokithakis/catt,skorokithakis/catt
|
+
+ import sys
+
+ if sys.version_info.major < 3:
+ print("This program requires Python 3 and above to run.")
+ sys.exit(1)
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
|
Make catt refuse to install under 2 more
|
## Code Before:
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
## Instruction:
Make catt refuse to install under 2 more
## Code After:
import sys
if sys.version_info.major < 3:
print("This program requires Python 3 and above to run.")
sys.exit(1)
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
|
+
+ import sys
+
+ if sys.version_info.major < 3:
+ print("This program requires Python 3 and above to run.")
+ sys.exit(1)
__author__ = "Stavros Korokithakis"
__email__ = "[email protected]"
__version__ = "0.9.3"
|
e9605bd92e67c7f5daf7011f871c3a9d915abe76
|
core/urls/base.py
|
core/urls/base.py
|
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
|
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': 'core:home'}, name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
|
Make logout redirect to home
|
Make logout redirect to home
|
Python
|
mit
|
joshsamara/game-website,joshsamara/game-website,joshsamara/game-website
|
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
- url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
+ url(r'^logout/$', 'django.contrib.auth.views.logout',
-
+ {'next_page': 'core:home'}, name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
|
Make logout redirect to home
|
## Code Before:
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
## Instruction:
Make logout redirect to home
## Code After:
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': 'core:home'}, name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
|
from django.conf.urls import patterns, url, include
from django.conf.urls.static import static
from core.views import Home, register
# Use this file to import all other url
from game_website import settings
urlpatterns = patterns(
# Examples:
# url(r'^blog/', include('blog.urls')),
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^games/', include('core.urls.games')),
url(r'^register/$', register, name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
- url(r'^logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
? ----------- ----------------
+ url(r'^logout/$', 'django.contrib.auth.views.logout',
-
+ {'next_page': 'core:home'}, name='logout'),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + \
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Used to serve static media in a dev environment. Should be disabled in production
|
bca6ca83ce43f6d9b96ac590bda9c6253384ab69
|
winthrop/people/viaf.py
|
winthrop/people/viaf.py
|
import requests
from django.conf import settings
class ViafAPI(object):
"""Wrapper for ViafAPI"""
def __init__(self):
default_url = 'https://www.viaf.org/viaf/AutoSuggest?query='
self.base_url = getattr(settings, "VIAF_AUTOSUGGEST_URL", default_url)
def search(self, query):
"""Do a GET request to pull in JSON"""
r = requests.get('%s%s' % (self.base_url, query))
# Check to make sure we have a sucesss (i.e. a 200 code)
if 200 <= r.status_code < 300:
return r.json()
else:
return None
@classmethod
def uri_from_id(cls, viaf_id):
return 'https://viaf.org/viaf/%s/' % viaf_id
|
import json
import requests
class ViafAPI(object):
"""Wrapper for Viaf API"""
def __init__(self):
self.base_url = "https://www.viaf.org/"
def suggest(self, query):
"""Do a GET request to pull in JSON"""
url = self.base_url + "viaf/AutoSuggest?query="
r = requests.get("%s%s" % (url, query))
# If result is empty, return an empty list instead of None
if not (r.json())['result']:
return json.dumps({'result': []})
return r.json()
@classmethod
def uri_from_id(cls, viaf_id):
return "https://viaf.org/viaf/%s/" % viaf_id
|
Refactor for other search options later (search -> suggest)
|
Refactor for other search options later (search -> suggest)
|
Python
|
apache-2.0
|
Princeton-CDH/winthrop-django,Princeton-CDH/winthrop-django,Princeton-CDH/winthrop-django
|
+ import json
import requests
- from django.conf import settings
class ViafAPI(object):
- """Wrapper for ViafAPI"""
+ """Wrapper for Viaf API"""
def __init__(self):
+ self.base_url = "https://www.viaf.org/"
- default_url = 'https://www.viaf.org/viaf/AutoSuggest?query='
- self.base_url = getattr(settings, "VIAF_AUTOSUGGEST_URL", default_url)
- def search(self, query):
+ def suggest(self, query):
"""Do a GET request to pull in JSON"""
+ url = self.base_url + "viaf/AutoSuggest?query="
- r = requests.get('%s%s' % (self.base_url, query))
+ r = requests.get("%s%s" % (url, query))
- # Check to make sure we have a sucesss (i.e. a 200 code)
- if 200 <= r.status_code < 300:
+ # If result is empty, return an empty list instead of None
+ if not (r.json())['result']:
+ return json.dumps({'result': []})
+
- return r.json()
+ return r.json()
- else:
- return None
@classmethod
def uri_from_id(cls, viaf_id):
- return 'https://viaf.org/viaf/%s/' % viaf_id
+ return "https://viaf.org/viaf/%s/" % viaf_id
|
Refactor for other search options later (search -> suggest)
|
## Code Before:
import requests
from django.conf import settings
class ViafAPI(object):
"""Wrapper for ViafAPI"""
def __init__(self):
default_url = 'https://www.viaf.org/viaf/AutoSuggest?query='
self.base_url = getattr(settings, "VIAF_AUTOSUGGEST_URL", default_url)
def search(self, query):
"""Do a GET request to pull in JSON"""
r = requests.get('%s%s' % (self.base_url, query))
# Check to make sure we have a sucesss (i.e. a 200 code)
if 200 <= r.status_code < 300:
return r.json()
else:
return None
@classmethod
def uri_from_id(cls, viaf_id):
return 'https://viaf.org/viaf/%s/' % viaf_id
## Instruction:
Refactor for other search options later (search -> suggest)
## Code After:
import json
import requests
class ViafAPI(object):
"""Wrapper for Viaf API"""
def __init__(self):
self.base_url = "https://www.viaf.org/"
def suggest(self, query):
"""Do a GET request to pull in JSON"""
url = self.base_url + "viaf/AutoSuggest?query="
r = requests.get("%s%s" % (url, query))
# If result is empty, return an empty list instead of None
if not (r.json())['result']:
return json.dumps({'result': []})
return r.json()
@classmethod
def uri_from_id(cls, viaf_id):
return "https://viaf.org/viaf/%s/" % viaf_id
|
+ import json
import requests
- from django.conf import settings
class ViafAPI(object):
- """Wrapper for ViafAPI"""
+ """Wrapper for Viaf API"""
? +
def __init__(self):
+ self.base_url = "https://www.viaf.org/"
- default_url = 'https://www.viaf.org/viaf/AutoSuggest?query='
- self.base_url = getattr(settings, "VIAF_AUTOSUGGEST_URL", default_url)
- def search(self, query):
? ^^^^
+ def suggest(self, query):
? +++ ^^
"""Do a GET request to pull in JSON"""
+ url = self.base_url + "viaf/AutoSuggest?query="
- r = requests.get('%s%s' % (self.base_url, query))
? ^ ^ ----------
+ r = requests.get("%s%s" % (url, query))
? ^ ^
- # Check to make sure we have a sucesss (i.e. a 200 code)
- if 200 <= r.status_code < 300:
+ # If result is empty, return an empty list instead of None
+ if not (r.json())['result']:
+ return json.dumps({'result': []})
+
- return r.json()
? ----
+ return r.json()
- else:
- return None
@classmethod
def uri_from_id(cls, viaf_id):
- return 'https://viaf.org/viaf/%s/' % viaf_id
? ^ ^
+ return "https://viaf.org/viaf/%s/" % viaf_id
? ^ ^
|
4cdf5be2a3c01e1b16a5e49bdf770f9d8573e16e
|
icekit/utils/testing.py
|
icekit/utils/testing.py
|
import glob
import os
import uuid
from django.core.files.base import ContentFile
from PIL import Image
from StringIO import StringIO
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
image_buf = StringIO()
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
|
import glob
import os
import uuid
from PIL import Image
from django.core.files.base import ContentFile
from django.utils import six
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
image_buf = six.StringIO()
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
|
Update StringIO import for Python3 compat
|
Update StringIO import for Python3 compat
|
Python
|
mit
|
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit
|
import glob
import os
import uuid
+
+ from PIL import Image
+
from django.core.files.base import ContentFile
+ from django.utils import six
- from PIL import Image
- from StringIO import StringIO
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
- image_buf = StringIO()
+ image_buf = six.StringIO()
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
|
Update StringIO import for Python3 compat
|
## Code Before:
import glob
import os
import uuid
from django.core.files.base import ContentFile
from PIL import Image
from StringIO import StringIO
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
image_buf = StringIO()
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
## Instruction:
Update StringIO import for Python3 compat
## Code After:
import glob
import os
import uuid
from PIL import Image
from django.core.files.base import ContentFile
from django.utils import six
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
image_buf = six.StringIO()
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
|
import glob
import os
import uuid
+
+ from PIL import Image
+
from django.core.files.base import ContentFile
+ from django.utils import six
- from PIL import Image
- from StringIO import StringIO
def new_test_image():
"""
Creates an automatically generated test image.
In your testing `tearDown` method make sure to delete the test
image with the helper function `delete_test_image`.
The recommended way of using this helper function is as follows:
object_1.image_property.save(*new_test_image())
:return: Image name and image content file.
"""
image_name = 'test-{}.png'.format(uuid.uuid4())
- image_buf = StringIO()
+ image_buf = six.StringIO()
? ++++
image = Image.new('RGBA', size=(50, 50), color=(256, 0, 0))
image.save(image_buf, 'png')
image_buf.seek(0)
return image_name, ContentFile(image_buf.read(), image_name)
def delete_test_image(image_field):
"""
Deletes test image generated as well as thumbnails if created.
The recommended way of using this helper function is as follows:
delete_test_image(object_1.image_property)
:param image_field: The image field on an object.
:return: None.
"""
# ensure all thumbs are deleted
for filename in glob.glob(
os.path.join('public', 'media', 'thumbs', image_field.name) + '*'):
os.unlink(filename)
# delete the saved file
image_field.delete()
|
e2452a46766abdef354d9e04f6fb61eae51bf6ee
|
yepes/models.py
|
yepes/models.py
|
from django import template
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
|
from __future__ import absolute_import
import types
from django import template
from django.db import connections
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.utils import six
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
def in_batches(self, batch_size):
"""
Makes an iterator that returns batches of the indicated size with the
results from applying this QuerySet to the database.
WARNING: Each batch is an evaluated QuerySet, so its results are already
cached.
"""
start = 0
stop = batch_size
batch = self[start:stop]
while batch:
yield batch
start += batch_size
stop += batch_size
batch = self[start:stop]
if six.PY2:
in_batches = types.MethodType(in_batches, None, QuerySet)
setattr(QuerySet, 'in_batches', in_batches)
def in_batches(self, *args, **kwargs):
return self.get_queryset().in_batches(*args, **kwargs)
def truncate(self):
"""
Quickly removes all records of the Manager's model and tries to restart
sequences owned by fields of the truncated model.
NOTE: Sequence restarting currently is only supported by postgresql backend.
"""
qs = self.get_queryset()
qs._for_write = True
conn = connections[qs.db]
statements = self.statements.get(conn.vendor)
if statements is None:
statements = self.statements['default']
opts = self.model._meta
cursor = conn.cursor()
cursor.execute(statements['truncate'].format(table=opts.db_table))
if six.PY2:
in_batches = types.MethodType(in_batches, None, Manager)
truncate = types.MethodType(truncate, None, Manager)
setattr(Manager, 'in_batches', in_batches)
setattr(Manager, 'statements', {
'postgresql': {
'truncate': 'TRUNCATE {table} RESTART IDENTITY;',
},
'mysql': {
'truncate': 'TRUNCATE {table};',
},
'default': {
'truncate': 'DELETE FROM {table};',
},
})
setattr(Manager, 'truncate', truncate)
|
Implement truncate() method for Manager and in_batches() method for QuerySet
|
Implement truncate() method for Manager and in_batches() method for QuerySet
|
Python
|
bsd-3-clause
|
samuelmaudo/yepes,samuelmaudo/yepes,samuelmaudo/yepes,samuelmaudo/yepes
|
+
+ from __future__ import absolute_import
+
+ import types
from django import template
+ from django.db import connections
+ from django.db.models.manager import Manager
+ from django.db.models.query import QuerySet
+ from django.utils import six
+
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
+
+ def in_batches(self, batch_size):
+ """
+ Makes an iterator that returns batches of the indicated size with the
+ results from applying this QuerySet to the database.
+
+ WARNING: Each batch is an evaluated QuerySet, so its results are already
+ cached.
+
+ """
+ start = 0
+ stop = batch_size
+ batch = self[start:stop]
+ while batch:
+ yield batch
+ start += batch_size
+ stop += batch_size
+ batch = self[start:stop]
+
+ if six.PY2:
+ in_batches = types.MethodType(in_batches, None, QuerySet)
+
+ setattr(QuerySet, 'in_batches', in_batches)
+
+
+ def in_batches(self, *args, **kwargs):
+ return self.get_queryset().in_batches(*args, **kwargs)
+
+ def truncate(self):
+ """
+ Quickly removes all records of the Manager's model and tries to restart
+ sequences owned by fields of the truncated model.
+
+ NOTE: Sequence restarting currently is only supported by postgresql backend.
+
+ """
+ qs = self.get_queryset()
+ qs._for_write = True
+
+ conn = connections[qs.db]
+ statements = self.statements.get(conn.vendor)
+ if statements is None:
+ statements = self.statements['default']
+
+ opts = self.model._meta
+ cursor = conn.cursor()
+ cursor.execute(statements['truncate'].format(table=opts.db_table))
+
+ if six.PY2:
+ in_batches = types.MethodType(in_batches, None, Manager)
+ truncate = types.MethodType(truncate, None, Manager)
+
+ setattr(Manager, 'in_batches', in_batches)
+ setattr(Manager, 'statements', {
+ 'postgresql': {
+ 'truncate': 'TRUNCATE {table} RESTART IDENTITY;',
+ },
+ 'mysql': {
+ 'truncate': 'TRUNCATE {table};',
+ },
+ 'default': {
+ 'truncate': 'DELETE FROM {table};',
+ },
+ })
+ setattr(Manager, 'truncate', truncate)
+
+
|
Implement truncate() method for Manager and in_batches() method for QuerySet
|
## Code Before:
from django import template
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
## Instruction:
Implement truncate() method for Manager and in_batches() method for QuerySet
## Code After:
from __future__ import absolute_import
import types
from django import template
from django.db import connections
from django.db.models.manager import Manager
from django.db.models.query import QuerySet
from django.utils import six
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
def in_batches(self, batch_size):
"""
Makes an iterator that returns batches of the indicated size with the
results from applying this QuerySet to the database.
WARNING: Each batch is an evaluated QuerySet, so its results are already
cached.
"""
start = 0
stop = batch_size
batch = self[start:stop]
while batch:
yield batch
start += batch_size
stop += batch_size
batch = self[start:stop]
if six.PY2:
in_batches = types.MethodType(in_batches, None, QuerySet)
setattr(QuerySet, 'in_batches', in_batches)
def in_batches(self, *args, **kwargs):
return self.get_queryset().in_batches(*args, **kwargs)
def truncate(self):
"""
Quickly removes all records of the Manager's model and tries to restart
sequences owned by fields of the truncated model.
NOTE: Sequence restarting currently is only supported by postgresql backend.
"""
qs = self.get_queryset()
qs._for_write = True
conn = connections[qs.db]
statements = self.statements.get(conn.vendor)
if statements is None:
statements = self.statements['default']
opts = self.model._meta
cursor = conn.cursor()
cursor.execute(statements['truncate'].format(table=opts.db_table))
if six.PY2:
in_batches = types.MethodType(in_batches, None, Manager)
truncate = types.MethodType(truncate, None, Manager)
setattr(Manager, 'in_batches', in_batches)
setattr(Manager, 'statements', {
'postgresql': {
'truncate': 'TRUNCATE {table} RESTART IDENTITY;',
},
'mysql': {
'truncate': 'TRUNCATE {table};',
},
'default': {
'truncate': 'DELETE FROM {table};',
},
})
setattr(Manager, 'truncate', truncate)
|
+
+ from __future__ import absolute_import
+
+ import types
from django import template
+ from django.db import connections
+ from django.db.models.manager import Manager
+ from django.db.models.query import QuerySet
+ from django.utils import six
+
template.add_to_builtins('yepes.defaultfilters')
template.add_to_builtins('yepes.defaulttags')
+
+
+ def in_batches(self, batch_size):
+ """
+ Makes an iterator that returns batches of the indicated size with the
+ results from applying this QuerySet to the database.
+
+ WARNING: Each batch is an evaluated QuerySet, so its results are already
+ cached.
+
+ """
+ start = 0
+ stop = batch_size
+ batch = self[start:stop]
+ while batch:
+ yield batch
+ start += batch_size
+ stop += batch_size
+ batch = self[start:stop]
+
+ if six.PY2:
+ in_batches = types.MethodType(in_batches, None, QuerySet)
+
+ setattr(QuerySet, 'in_batches', in_batches)
+
+
+ def in_batches(self, *args, **kwargs):
+ return self.get_queryset().in_batches(*args, **kwargs)
+
+ def truncate(self):
+ """
+ Quickly removes all records of the Manager's model and tries to restart
+ sequences owned by fields of the truncated model.
+
+ NOTE: Sequence restarting currently is only supported by postgresql backend.
+
+ """
+ qs = self.get_queryset()
+ qs._for_write = True
+
+ conn = connections[qs.db]
+ statements = self.statements.get(conn.vendor)
+ if statements is None:
+ statements = self.statements['default']
+
+ opts = self.model._meta
+ cursor = conn.cursor()
+ cursor.execute(statements['truncate'].format(table=opts.db_table))
+
+ if six.PY2:
+ in_batches = types.MethodType(in_batches, None, Manager)
+ truncate = types.MethodType(truncate, None, Manager)
+
+ setattr(Manager, 'in_batches', in_batches)
+ setattr(Manager, 'statements', {
+ 'postgresql': {
+ 'truncate': 'TRUNCATE {table} RESTART IDENTITY;',
+ },
+ 'mysql': {
+ 'truncate': 'TRUNCATE {table};',
+ },
+ 'default': {
+ 'truncate': 'DELETE FROM {table};',
+ },
+ })
+ setattr(Manager, 'truncate', truncate)
+
|
57bc8b3c40bbafda6f69b23c230ad73750e881ab
|
hashable/helpers.py
|
hashable/helpers.py
|
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
'equality_comparable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls = equality_comparable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
def equality_comparable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
|
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
'equalable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls = equalable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
def equalable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
|
Rename decorator equality_comparable to equalable
|
Rename decorator equality_comparable to equalable
|
Python
|
mit
|
minmax/hashable
|
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
- 'equality_comparable',
+ 'equalable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
- cls = equality_comparable(cls, attributes, methods)
+ cls = equalable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
- def equality_comparable(cls=None, attributes=None, methods=None):
+ def equalable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
|
Rename decorator equality_comparable to equalable
|
## Code Before:
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
'equality_comparable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls = equality_comparable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
def equality_comparable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
## Instruction:
Rename decorator equality_comparable to equalable
## Code After:
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
'equalable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls = equalable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
def equalable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
|
from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
- 'equality_comparable',
? ----------
+ 'equalable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
- cls = equality_comparable(cls, attributes, methods)
? ----------
+ cls = equalable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
- def equality_comparable(cls=None, attributes=None, methods=None):
? ----------
+ def equalable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
|
5723dbbf2dbebf349c61a00ee4ea665b4009bd18
|
spur/io.py
|
spur/io.py
|
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
def __init__(self, stdout_in, stdout_out):
self._stdout_in = stdout_in
self._stdout_out = stdout_out
self._output = []
if stdout_out:
self._stdout_thread = threading.Thread(target=self._capture_stdout)
self._stdout_thread.daemon = True
self._stdout_thread.start()
else:
self._stdout_thread = None
def wait(self):
if self._stdout_thread:
self._stdout_thread.join()
return "".join(self._output)
def _capture_stdout(self):
while True:
output = self._stdout_in.read(1)
if output:
self._stdout_out.write(output)
self._output.append(output)
else:
return
|
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
def __init__(self, file_in, file_out):
self._file_in = file_in
self._file_out = file_out
self._output = []
if file_out:
self._thread = threading.Thread(target=self._capture_output)
self._thread.daemon = True
self._thread.start()
else:
self._thread = None
def wait(self):
if self._thread:
self._thread.join()
return "".join(self._output)
def _capture_output (self):
while True:
output = self._file_in.read(1)
if output:
self._file_out.write(output)
self._output.append(output)
else:
return
|
Remove references to stdout in OutputHandler
|
Remove references to stdout in OutputHandler
|
Python
|
bsd-2-clause
|
mwilliamson/spur.py
|
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
- def __init__(self, stdout_in, stdout_out):
+ def __init__(self, file_in, file_out):
- self._stdout_in = stdout_in
- self._stdout_out = stdout_out
+ self._file_in = file_in
+ self._file_out = file_out
self._output = []
- if stdout_out:
+ if file_out:
- self._stdout_thread = threading.Thread(target=self._capture_stdout)
+ self._thread = threading.Thread(target=self._capture_output)
- self._stdout_thread.daemon = True
+ self._thread.daemon = True
- self._stdout_thread.start()
+ self._thread.start()
else:
- self._stdout_thread = None
+ self._thread = None
def wait(self):
- if self._stdout_thread:
+ if self._thread:
- self._stdout_thread.join()
+ self._thread.join()
return "".join(self._output)
- def _capture_stdout(self):
+ def _capture_output (self):
while True:
- output = self._stdout_in.read(1)
+ output = self._file_in.read(1)
if output:
- self._stdout_out.write(output)
+ self._file_out.write(output)
self._output.append(output)
else:
return
|
Remove references to stdout in OutputHandler
|
## Code Before:
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
def __init__(self, stdout_in, stdout_out):
self._stdout_in = stdout_in
self._stdout_out = stdout_out
self._output = []
if stdout_out:
self._stdout_thread = threading.Thread(target=self._capture_stdout)
self._stdout_thread.daemon = True
self._stdout_thread.start()
else:
self._stdout_thread = None
def wait(self):
if self._stdout_thread:
self._stdout_thread.join()
return "".join(self._output)
def _capture_stdout(self):
while True:
output = self._stdout_in.read(1)
if output:
self._stdout_out.write(output)
self._output.append(output)
else:
return
## Instruction:
Remove references to stdout in OutputHandler
## Code After:
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
def __init__(self, file_in, file_out):
self._file_in = file_in
self._file_out = file_out
self._output = []
if file_out:
self._thread = threading.Thread(target=self._capture_output)
self._thread.daemon = True
self._thread.start()
else:
self._thread = None
def wait(self):
if self._thread:
self._thread.join()
return "".join(self._output)
def _capture_output (self):
while True:
output = self._file_in.read(1)
if output:
self._file_out.write(output)
self._output.append(output)
else:
return
|
import threading
class IoHandler(object):
def __init__(self, in_out_pairs, read_all):
self._handlers = [
OutputHandler(file_in, file_out)
for file_in, file_out
in in_out_pairs
]
self._read_all = read_all
def wait(self):
handler_result = [handler.wait() for handler in self._handlers]
read_all_result = self._read_all()
return [
handler_output or read_all_output
for handler_output, read_all_output
in zip(handler_result, read_all_result)
]
class OutputHandler(object):
- def __init__(self, stdout_in, stdout_out):
? ^^^^^^ ^^^^^^
+ def __init__(self, file_in, file_out):
? ^^^^ ^^^^
- self._stdout_in = stdout_in
- self._stdout_out = stdout_out
+ self._file_in = file_in
+ self._file_out = file_out
self._output = []
- if stdout_out:
? ^^^^^^
+ if file_out:
? ^^^^
- self._stdout_thread = threading.Thread(target=self._capture_stdout)
? ------- ---
+ self._thread = threading.Thread(target=self._capture_output)
? +++
- self._stdout_thread.daemon = True
? -------
+ self._thread.daemon = True
- self._stdout_thread.start()
? -------
+ self._thread.start()
else:
- self._stdout_thread = None
? -------
+ self._thread = None
def wait(self):
- if self._stdout_thread:
? -------
+ if self._thread:
- self._stdout_thread.join()
? -------
+ self._thread.join()
return "".join(self._output)
- def _capture_stdout(self):
? ---
+ def _capture_output (self):
? ++++
while True:
- output = self._stdout_in.read(1)
? ^^^^^^
+ output = self._file_in.read(1)
? ^^^^
if output:
- self._stdout_out.write(output)
? ^^^^^^
+ self._file_out.write(output)
? ^^^^
self._output.append(output)
else:
return
|
0f7732d3ceb67ecd445bb4fe2fee1edf4ce8a2f4
|
rock/utils.py
|
rock/utils.py
|
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
def raw(value):
return value.replace('\\', '\\\\')
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
|
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
def raw(text):
return text.replace('\\', '\\\\')
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
|
Tweak raw text parameter name
|
Tweak raw text parameter name
|
Python
|
mit
|
silas/rock,silas/rock,silas/rock,silas/rock,silas/rock,silas/rock,silas/rock,silas/rock
|
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
- def raw(value):
+ def raw(text):
- return value.replace('\\', '\\\\')
+ return text.replace('\\', '\\\\')
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
|
Tweak raw text parameter name
|
## Code Before:
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
def raw(value):
return value.replace('\\', '\\\\')
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
## Instruction:
Tweak raw text parameter name
## Code After:
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
def raw(text):
return text.replace('\\', '\\\\')
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
|
from __future__ import unicode_literals
import os
try:
from io import StringIO
except ImportError: # pragma: no cover
from StringIO import StringIO
from rock.exceptions import ConfigError
ROCK_SHELL = (os.environ.get('ROCK_SHELL') or '/bin/bash -c').split()
ROCK_SHELL.insert(1, os.path.basename(ROCK_SHELL[0]))
def isexecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
try:
basestring
def isstr(s):
return isinstance(s, basestring)
except NameError: # pragma: no cover
def isstr(s):
return isinstance(s, str)
- def raw(value):
? ^^^^
+ def raw(text):
? ^ ++
- return value.replace('\\', '\\\\')
? ^^^^
+ return text.replace('\\', '\\\\')
? ^ ++
class Shell(object):
def __init__(self):
self.stdin = StringIO()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.run()
def run(self):
if not isexecutable(ROCK_SHELL[0]):
raise ConfigError('invalid ROCK_SHELL: %s' % ROCK_SHELL)
os.execl(*(ROCK_SHELL + [self.stdin.getvalue()]))
def write(self, text):
self.stdin.write(text + '\n')
|
8e2a42369228f3d19b046a610c93de4bec06d5bf
|
avocado/core/structures.py
|
avocado/core/structures.py
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
REPR_OUTPUT_SIZE = 20
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = '...(remaining elements truncated)...'
return repr(tuple(data))
|
Add __repr__ to ChoicesDict structure
|
Add __repr__ to ChoicesDict structure
|
Python
|
bsd-2-clause
|
murphyke/avocado,murphyke/avocado,murphyke/avocado,murphyke/avocado
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
+
+
+ REPR_OUTPUT_SIZE = 20
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
+ def __repr__(self):
+ data = list(self[:REPR_OUTPUT_SIZE + 1])
+
+ if len(data) > REPR_OUTPUT_SIZE:
+ data[-1] = '...(remaining elements truncated)...'
+
+ return repr(tuple(data))
+
|
Add __repr__ to ChoicesDict structure
|
## Code Before:
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
## Instruction:
Add __repr__ to ChoicesDict structure
## Code After:
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
REPR_OUTPUT_SIZE = 20
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = '...(remaining elements truncated)...'
return repr(tuple(data))
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
+
+
+ REPR_OUTPUT_SIZE = 20
class ChoicesDict(OrderedDict):
"OrdereDict that yields the key and value on iteration."
def __iter__(self):
iterator = super(ChoicesDict, self).__iter__()
for key in iterator:
yield key, self[key]
+
+ def __repr__(self):
+ data = list(self[:REPR_OUTPUT_SIZE + 1])
+
+ if len(data) > REPR_OUTPUT_SIZE:
+ data[-1] = '...(remaining elements truncated)...'
+
+ return repr(tuple(data))
|
fe9e11af28e2ffe2b3da5ebb0971cd712136284c
|
nodeconductor/iaas/migrations/0011_cloudprojectmembership_availability_zone.py
|
nodeconductor/iaas/migrations/0011_cloudprojectmembership_availability_zone.py
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
field=models.CharField(max_length=100, blank=True),
preserve_default=True,
),
]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
field=models.CharField(help_text='Optional availability group. Will be used for all instances provisioned in this tenant', max_length=100, blank=True),
preserve_default=True,
),
]
|
Add help_text to availability_zone field (nc-327)
|
Add help_text to availability_zone field (nc-327)
|
Python
|
mit
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
- field=models.CharField(max_length=100, blank=True),
+ field=models.CharField(help_text='Optional availability group. Will be used for all instances provisioned in this tenant', max_length=100, blank=True),
preserve_default=True,
),
]
|
Add help_text to availability_zone field (nc-327)
|
## Code Before:
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
field=models.CharField(max_length=100, blank=True),
preserve_default=True,
),
]
## Instruction:
Add help_text to availability_zone field (nc-327)
## Code After:
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
field=models.CharField(help_text='Optional availability group. Will be used for all instances provisioned in this tenant', max_length=100, blank=True),
preserve_default=True,
),
]
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('iaas', '0010_auto_20150118_1834'),
]
operations = [
migrations.AddField(
model_name='cloudprojectmembership',
name='availability_zone',
- field=models.CharField(max_length=100, blank=True),
+ field=models.CharField(help_text='Optional availability group. Will be used for all instances provisioned in this tenant', max_length=100, blank=True),
preserve_default=True,
),
]
|
0a336447546442ab5d48716223713135a4812adf
|
get_problem.py
|
get_problem.py
|
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
if __name__ == '__main__':
main()
|
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
print("'''")
print('Problem %d' % p)
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
print("'''")
if __name__ == '__main__':
main()
|
ADD comment for python file
|
ADD comment for python file
|
Python
|
mit
|
byung-u/ProjectEuler
|
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
+ print("'''")
+ print('Problem %d' % p)
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
-
+ print("'''")
if __name__ == '__main__':
main()
|
ADD comment for python file
|
## Code Before:
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
if __name__ == '__main__':
main()
## Instruction:
ADD comment for python file
## Code After:
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
print("'''")
print('Problem %d' % p)
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
print("'''")
if __name__ == '__main__':
main()
|
import sys
from bs4 import BeautifulSoup
from requests import get, codes
def match_soup_class(target, mode='class'):
def do_match(tag):
classes = tag.get(mode, [])
return all(c in classes for c in target)
return do_match
def main():
if len(sys.argv) == 1:
p = 1
else:
p = int(sys.argv[1])
url = 'https://projecteuler.net/problem=%d' % p
r = get(url)
if r.status_code != codes.ok:
print('[url request failed] ', url)
return
soup = BeautifulSoup(r.text, 'html.parser')
+ print("'''")
+ print('Problem %d' % p)
for content in soup.find_all(match_soup_class(['problem_content'])):
print(content.text)
-
+ print("'''")
if __name__ == '__main__':
main()
|
7e00b8a4436ee4bdad4d248a29985b1cef741a53
|
nimbus/apps/media/utils.py
|
nimbus/apps/media/utils.py
|
def bsd_rand(seed):
return (1103515245 * seed + 12345) & 0x7fffffff
def baseconv(v1, a1, a2):
n1 = {c: i for i, c in dict(enumerate(a1)).items()}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
d1 += n1[c] * pow(b1, b1 - i - 1)
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
def url_hash_from_pk(pk):
b10 = "0123456789"
b62 = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
return baseconv(str(bsd_rand(pk)), b10, b62)
|
from nimbus.settings import SECRET_KEY
import hashlib
def baseconv(v1, a1, a2):
n1 = {c: i for i, c in enumerate(a1)}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
d1 += n1[c] * pow(b1, len(v1) - i - 1)
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
m = hashlib.md5()
m.update(SECRET_KEY)
c = int(baseconv(m.hexdigest(), "0123456789abcdef", "0123456789"))
c = c - (c % 2) + 1
def lcg(seed):
return (1103515245 * seed + c) & 0x7fffffff
def url_hash_from_pk(pk):
b10 = "0123456789"
b62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
return baseconv(str(lcg(pk)), b10, b62)
|
Patch bug and security vulnerability
|
Patch bug and security vulnerability
|
Python
|
mit
|
ethanal/Nimbus,ethanal/Nimbus,ethanal/Nimbus,ethanal/Nimbus
|
- def bsd_rand(seed):
- return (1103515245 * seed + 12345) & 0x7fffffff
+ from nimbus.settings import SECRET_KEY
+ import hashlib
def baseconv(v1, a1, a2):
- n1 = {c: i for i, c in dict(enumerate(a1)).items()}
+ n1 = {c: i for i, c in enumerate(a1)}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
- d1 += n1[c] * pow(b1, b1 - i - 1)
+ d1 += n1[c] * pow(b1, len(v1) - i - 1)
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
+ m = hashlib.md5()
+ m.update(SECRET_KEY)
+ c = int(baseconv(m.hexdigest(), "0123456789abcdef", "0123456789"))
+ c = c - (c % 2) + 1
+
+
+ def lcg(seed):
+ return (1103515245 * seed + c) & 0x7fffffff
+
+
def url_hash_from_pk(pk):
b10 = "0123456789"
- b62 = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ b62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
- return baseconv(str(bsd_rand(pk)), b10, b62)
+ return baseconv(str(lcg(pk)), b10, b62)
|
Patch bug and security vulnerability
|
## Code Before:
def bsd_rand(seed):
return (1103515245 * seed + 12345) & 0x7fffffff
def baseconv(v1, a1, a2):
n1 = {c: i for i, c in dict(enumerate(a1)).items()}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
d1 += n1[c] * pow(b1, b1 - i - 1)
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
def url_hash_from_pk(pk):
b10 = "0123456789"
b62 = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
return baseconv(str(bsd_rand(pk)), b10, b62)
## Instruction:
Patch bug and security vulnerability
## Code After:
from nimbus.settings import SECRET_KEY
import hashlib
def baseconv(v1, a1, a2):
n1 = {c: i for i, c in enumerate(a1)}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
d1 += n1[c] * pow(b1, len(v1) - i - 1)
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
m = hashlib.md5()
m.update(SECRET_KEY)
c = int(baseconv(m.hexdigest(), "0123456789abcdef", "0123456789"))
c = c - (c % 2) + 1
def lcg(seed):
return (1103515245 * seed + c) & 0x7fffffff
def url_hash_from_pk(pk):
b10 = "0123456789"
b62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
return baseconv(str(lcg(pk)), b10, b62)
|
- def bsd_rand(seed):
- return (1103515245 * seed + 12345) & 0x7fffffff
+ from nimbus.settings import SECRET_KEY
+ import hashlib
def baseconv(v1, a1, a2):
- n1 = {c: i for i, c in dict(enumerate(a1)).items()}
? ----- ---------
+ n1 = {c: i for i, c in enumerate(a1)}
b1 = len(a1)
b2 = len(a2)
d1 = 0
for i, c in enumerate(v1):
- d1 += n1[c] * pow(b1, b1 - i - 1)
? ^
+ d1 += n1[c] * pow(b1, len(v1) - i - 1)
? ^^^^^ +
v2 = ""
while d1:
v2 = a2[d1 % b2] + v2
d1 //= b2
return v2
+ m = hashlib.md5()
+ m.update(SECRET_KEY)
+ c = int(baseconv(m.hexdigest(), "0123456789abcdef", "0123456789"))
+ c = c - (c % 2) + 1
+
+
+ def lcg(seed):
+ return (1103515245 * seed + c) & 0x7fffffff
+
+
def url_hash_from_pk(pk):
b10 = "0123456789"
- b62 = "abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ b62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
- return baseconv(str(bsd_rand(pk)), b10, b62)
? ^^^^^^^^
+ return baseconv(str(lcg(pk)), b10, b62)
? ^^^
|
e4580a598e7d930ad90f5480751804fc1fa89826
|
pronto/__init__.py
|
pronto/__init__.py
|
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
__version__ = pkg_resources.resource_string(__name__, "_version.txt").decode('utf-8').strip()
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
|
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
__version__ = (
__import__('pkg_resources')
.resource_string(__name__, "_version.txt")
.decode('utf-8')
.strip()
)
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
|
Remove `pkg_resources` from the top-level package
|
Remove `pkg_resources` from the top-level package
|
Python
|
mit
|
althonos/pronto
|
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
- __version__ = pkg_resources.resource_string(__name__, "_version.txt").decode('utf-8').strip()
+ __version__ = (
+ __import__('pkg_resources')
+ .resource_string(__name__, "_version.txt")
+ .decode('utf-8')
+ .strip()
+ )
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
|
Remove `pkg_resources` from the top-level package
|
## Code Before:
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
__version__ = pkg_resources.resource_string(__name__, "_version.txt").decode('utf-8').strip()
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
## Instruction:
Remove `pkg_resources` from the top-level package
## Code After:
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
__version__ = (
__import__('pkg_resources')
.resource_string(__name__, "_version.txt")
.decode('utf-8')
.strip()
)
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
|
import pkg_resources
__author__ = "Martin Larralde <[email protected]>"
__license__ = "MIT"
- __version__ = pkg_resources.resource_string(__name__, "_version.txt").decode('utf-8').strip()
+ __version__ = (
+ __import__('pkg_resources')
+ .resource_string(__name__, "_version.txt")
+ .decode('utf-8')
+ .strip()
+ )
from .ontology import Ontology # noqa: F401
from .term import Term # noqa: F401
from .definition import Definition # noqa: F401
from .relationship import Relationship # noqa: F401
from .synonym import Synonym, SynonymType # noqa: F401
from .xref import Xref # noqa: F401
|
8da480a92f3e27807275868c27cb41cbde8504d8
|
neo/test/rawiotest/test_alphaomegarawio.py
|
neo/test/rawiotest/test_alphaomegarawio.py
|
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
|
import logging
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
logging.getLogger().setLevel(logging.INFO)
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
|
Set logging level higher so we don't spam tests with debug messages
|
Set logging level higher so we don't spam tests with debug messages
|
Python
|
bsd-3-clause
|
INM-6/python-neo,apdavison/python-neo,JuliaSprenger/python-neo,NeuralEnsemble/python-neo,samuelgarcia/python-neo
|
+ import logging
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
+
+
+ logging.getLogger().setLevel(logging.INFO)
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
|
Set logging level higher so we don't spam tests with debug messages
|
## Code Before:
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
## Instruction:
Set logging level higher so we don't spam tests with debug messages
## Code After:
import logging
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
logging.getLogger().setLevel(logging.INFO)
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
|
+ import logging
import unittest
from neo.rawio.alphaomegarawio import AlphaOmegaRawIO
from neo.test.rawiotest.common_rawio_test import BaseTestRawIO
+
+
+ logging.getLogger().setLevel(logging.INFO)
class TestAlphaOmegaRawIO(BaseTestRawIO, unittest.TestCase):
rawioclass = AlphaOmegaRawIO
entities_to_download = [
"alphaomega",
]
entities_to_test = [
"alphaomega/",
]
if __name__ == "__main__":
unittest.main()
|
4c4022e3a215b9b591220cd19fedbc501b63a1b2
|
virtualenv/builders/base.py
|
virtualenv/builders/base.py
|
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
def create_virtual_environment(self, destination):
raise NotImplementedError
|
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
# Install our activate scripts into the virtual environment
self.install_scripts(destination)
def create_virtual_environment(self, destination):
raise NotImplementedError
def install_scripts(self, destination):
pass
|
Add a hook we'll eventually use to install the activate scripts
|
Add a hook we'll eventually use to install the activate scripts
|
Python
|
mit
|
ionelmc/virtualenv,ionelmc/virtualenv,ionelmc/virtualenv
|
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
+ # Install our activate scripts into the virtual environment
+ self.install_scripts(destination)
+
def create_virtual_environment(self, destination):
raise NotImplementedError
+ def install_scripts(self, destination):
+ pass
+
|
Add a hook we'll eventually use to install the activate scripts
|
## Code Before:
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
def create_virtual_environment(self, destination):
raise NotImplementedError
## Instruction:
Add a hook we'll eventually use to install the activate scripts
## Code After:
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
# Install our activate scripts into the virtual environment
self.install_scripts(destination)
def create_virtual_environment(self, destination):
raise NotImplementedError
def install_scripts(self, destination):
pass
|
import sys
class BaseBuilder(object):
def __init__(self, python, system_site_packages=False, clear=False):
# We default to sys.executable if we're not given a Python.
if python is None:
python = sys.executable
self.python = python
self.system_site_packages = system_site_packages
self.clear = clear
def create(self, destination):
# Actually Create the virtual environment
self.create_virtual_environment(destination)
+ # Install our activate scripts into the virtual environment
+ self.install_scripts(destination)
+
def create_virtual_environment(self, destination):
raise NotImplementedError
+
+ def install_scripts(self, destination):
+ pass
|
0af3b589c6c271d07ad4e204fa41aa0fed167a94
|
thinglang/parser/constructs/cast_operation.py
|
thinglang/parser/constructs/cast_operation.py
|
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
class CastOperation(object):
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
|
from thinglang.lexer.operators.casts import LexicalCast
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.nodes import BaseNode
from thinglang.parser.rule import ParserRule
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
from thinglang.utils.type_descriptors import ValueType
class CastOperation(BaseNode):
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
@staticmethod
@ParserRule.mark
def parse_inline_cast_op(value: ValueType, _: LexicalCast, target_type: Identifier):
return MethodCall(Access([value, Identifier('convert_') + target_type]), [])
|
Add explicit parsing rule for cast operations
|
Add explicit parsing rule for cast operations
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
+ from thinglang.lexer.operators.casts import LexicalCast
from thinglang.lexer.values.identifier import Identifier
+ from thinglang.parser.nodes import BaseNode
+ from thinglang.parser.rule import ParserRule
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
+ from thinglang.utils.type_descriptors import ValueType
- class CastOperation(object):
+ class CastOperation(BaseNode):
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
+ @staticmethod
+ @ParserRule.mark
+ def parse_inline_cast_op(value: ValueType, _: LexicalCast, target_type: Identifier):
+ return MethodCall(Access([value, Identifier('convert_') + target_type]), [])
+
|
Add explicit parsing rule for cast operations
|
## Code Before:
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
class CastOperation(object):
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
## Instruction:
Add explicit parsing rule for cast operations
## Code After:
from thinglang.lexer.operators.casts import LexicalCast
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.nodes import BaseNode
from thinglang.parser.rule import ParserRule
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
from thinglang.utils.type_descriptors import ValueType
class CastOperation(BaseNode):
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
@staticmethod
@ParserRule.mark
def parse_inline_cast_op(value: ValueType, _: LexicalCast, target_type: Identifier):
return MethodCall(Access([value, Identifier('convert_') + target_type]), [])
|
+ from thinglang.lexer.operators.casts import LexicalCast
from thinglang.lexer.values.identifier import Identifier
+ from thinglang.parser.nodes import BaseNode
+ from thinglang.parser.rule import ParserRule
from thinglang.parser.values.access import Access
from thinglang.parser.values.method_call import MethodCall
+ from thinglang.utils.type_descriptors import ValueType
- class CastOperation(object):
? ^^ --
+ class CastOperation(BaseNode):
? +++++ ^
"""
Explicitly cast from one type to another
Expects a conversion method on the source class
"""
@staticmethod
def create(source: Identifier, destination: Identifier) -> MethodCall:
return MethodCall(Access([source, Identifier('convert_') + destination]), MethodCall.STACK_ARGS)
+
+ @staticmethod
+ @ParserRule.mark
+ def parse_inline_cast_op(value: ValueType, _: LexicalCast, target_type: Identifier):
+ return MethodCall(Access([value, Identifier('convert_') + target_type]), [])
|
6cf3baed6e5f707e5c307388018f4bb3121327f9
|
nanoservice/config.py
|
nanoservice/config.py
|
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
conf = {}
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
|
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
class DotDict(dict):
""" Access a dictionary like an object """
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
conf = DotDict()
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
|
Access the conf like a object
|
Access the conf like a object
|
Python
|
mit
|
walkr/nanoservice
|
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
+
+
+ class DotDict(dict):
+ """ Access a dictionary like an object """
+
+ def __getattr__(self, key):
+ return self[key]
+
+ def __setattr__(self, key, value):
+ self[key] = value
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
- conf = {}
+ conf = DotDict()
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
|
Access the conf like a object
|
## Code Before:
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
conf = {}
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
## Instruction:
Access the conf like a object
## Code After:
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
class DotDict(dict):
""" Access a dictionary like an object """
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
conf = DotDict()
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
|
""" Read configuration for a service from a json file """
import io
import json
from .client import Client
from .error import ConfigError
+
+
+ class DotDict(dict):
+ """ Access a dictionary like an object """
+
+ def __getattr__(self, key):
+ return self[key]
+
+ def __setattr__(self, key, value):
+ self[key] = value
def load(filepath=None, filecontent=None, clients=True):
""" Read the json file located at `filepath`
If `filecontent` is specified, its content will be json decoded
and loaded instead. The `clients` arg is a binary flag
which specifies whether the endpoints present in config (`filecontent`),
should be used to create `Client` objects.
Usage:
config.load(filepath=None, filecontent=None):
Provide either a filepath or a json string
"""
- conf = {}
+ conf = DotDict()
# Read json configuration
assert filepath or filecontent
if not filecontent:
with io.FileIO(filepath) as fh:
filecontent = fh.read().decode('utf-8')
configs = json.loads(filecontent)
if 'service.endpoint' not in configs:
raise ConfigError('Missing `service.endpoint` from config file')
# Update the conf items (Create clients if necessary)
for key, value in configs.items():
conf[key] = value
if key.endswith('.endpoint') and clients:
conf[key] = Client(value)
return conf
|
64b4abde42b653e66444876dee0700afa64e6c6b
|
releasetasks/test/__init__.py
|
releasetasks/test/__init__.py
|
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
def create_test_args(non_standard_arguments, permitted_defaults=None):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
if permitted_defaults is not None:
default_arguments = {
key: val
for key, val in default_arguments.items()
if key in non_standard_arguments or key in permitted_defaults
}
return default_arguments
|
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
def create_test_args(non_standard_arguments):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
return default_arguments
|
Remove redundant keyword argument from create_test_args
|
Remove redundant keyword argument from create_test_args
|
Python
|
mpl-2.0
|
mozilla/releasetasks,bhearsum/releasetasks,rail/releasetasks
|
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
- def create_test_args(non_standard_arguments, permitted_defaults=None):
+ def create_test_args(non_standard_arguments):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
-
- if permitted_defaults is not None:
- default_arguments = {
- key: val
- for key, val in default_arguments.items()
- if key in non_standard_arguments or key in permitted_defaults
- }
-
return default_arguments
|
Remove redundant keyword argument from create_test_args
|
## Code Before:
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
def create_test_args(non_standard_arguments, permitted_defaults=None):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
if permitted_defaults is not None:
default_arguments = {
key: val
for key, val in default_arguments.items()
if key in non_standard_arguments or key in permitted_defaults
}
return default_arguments
## Instruction:
Remove redundant keyword argument from create_test_args
## Code After:
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
def create_test_args(non_standard_arguments):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
return default_arguments
|
import os
import yaml
def read_file(path):
with open(path) as f:
return f.read()
PVT_KEY_FILE = os.path.join(os.path.dirname(__file__), "id_rsa")
PVT_KEY = read_file(PVT_KEY_FILE)
PUB_KEY = read_file(os.path.join(os.path.dirname(__file__), "id_rsa.pub"))
OTHER_PUB_KEY = read_file(os.path.join(os.path.dirname(__file__),
"other_rsa.pub"))
DUMMY_PUBLIC_KEY = os.path.join(os.path.dirname(__file__), "public.key")
- def create_test_args(non_standard_arguments, permitted_defaults=None):
? -------------------------
+ def create_test_args(non_standard_arguments):
with open(os.path.join(os.path.dirname(__file__), 'test_graph_parameters.yml')) as f:
default_arguments = yaml.safe_load(f)
default_arguments.update(non_standard_arguments)
-
- if permitted_defaults is not None:
- default_arguments = {
- key: val
- for key, val in default_arguments.items()
- if key in non_standard_arguments or key in permitted_defaults
- }
-
return default_arguments
|
50367a2d73c395a85bb7dae058f9435be6ad7c36
|
vtimshow/__init__.py
|
vtimshow/__init__.py
|
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
|
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
def _setup_logger(name):
"""
Add the GUI's logging window as a stream handler.
By default, the stream logger is removed during the invocation of
``vitables``. The logging window in the GUI is a stream handler for
the ``vitables`` logger _only_. This method will add the logging
window in the GUI as a stream handler for the named logger. The
method checks to see if ``vitables`` is an active application. If
it is not, nothing is done.
"""
logger = logging.getLogger(name)
app = vitables.utils.getApp()
if app is not None:
stream = logging.StreamHandler(app.gui.logger)
stream.setFormatter(
logging.Formatter(vitables.vtgui._GUI_LOG_FORMAT)
)
logger.addHandler(stream)
return
_setup_logger(_defaults["MODULE_NAME"])
|
Add method to log to console
|
Add method to log to console
Add a method to set the GUI logging window to be the stream handler for
my plug in.
|
Python
|
mit
|
kprussing/vtimshow
|
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
+ def _setup_logger(name):
+ """
+ Add the GUI's logging window as a stream handler.
+ By default, the stream logger is removed during the invocation of
+ ``vitables``. The logging window in the GUI is a stream handler for
+ the ``vitables`` logger _only_. This method will add the logging
+ window in the GUI as a stream handler for the named logger. The
+ method checks to see if ``vitables`` is an active application. If
+ it is not, nothing is done.
+
+ """
+ logger = logging.getLogger(name)
+ app = vitables.utils.getApp()
+ if app is not None:
+ stream = logging.StreamHandler(app.gui.logger)
+ stream.setFormatter(
+ logging.Formatter(vitables.vtgui._GUI_LOG_FORMAT)
+ )
+ logger.addHandler(stream)
+
+ return
+
+ _setup_logger(_defaults["MODULE_NAME"])
+
+
|
Add method to log to console
|
## Code Before:
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
## Instruction:
Add method to log to console
## Code After:
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
def _setup_logger(name):
"""
Add the GUI's logging window as a stream handler.
By default, the stream logger is removed during the invocation of
``vitables``. The logging window in the GUI is a stream handler for
the ``vitables`` logger _only_. This method will add the logging
window in the GUI as a stream handler for the named logger. The
method checks to see if ``vitables`` is an active application. If
it is not, nothing is done.
"""
logger = logging.getLogger(name)
app = vitables.utils.getApp()
if app is not None:
stream = logging.StreamHandler(app.gui.logger)
stream.setFormatter(
logging.Formatter(vitables.vtgui._GUI_LOG_FORMAT)
)
logger.addHandler(stream)
return
_setup_logger(_defaults["MODULE_NAME"])
|
import logging
import os
import vitables
_defaults = dict(
AUTHOR = "Keith F Prussing",
AUTHOR_EMAIL = "[email protected]",
LICENSE = "MIT",
PLUGIN_CLASS = "VtImageViewer",
PLUGIN_NAME = "Image Viewer",
COMMENT = "Display data sets as images",
VERSION = "{VERSION!s}",
UID = "image_viewer"
)
_defaults["FOLDER"], _defaults["MODULE_NAME"] = os.path.split(
os.path.dirname(__file__)
)
_defaults["LOGGER"] = logging.getLogger(_defaults["MODULE_NAME"])
_defaults["LOGGER"].addHandler(logging.NullHandler())
__docformat__ = "restructuredtext"
__version__ = _defaults["VERSION"]
plugin_class = _defaults["PLUGIN_CLASS"]
plugin_name = _defaults["PLUGIN_NAME"]
comment = _defaults["COMMENT"]
from vtimshow.vtimageviewer import VtImageViewer
+ def _setup_logger(name):
+ """
+ Add the GUI's logging window as a stream handler.
+
+ By default, the stream logger is removed during the invocation of
+ ``vitables``. The logging window in the GUI is a stream handler for
+ the ``vitables`` logger _only_. This method will add the logging
+ window in the GUI as a stream handler for the named logger. The
+ method checks to see if ``vitables`` is an active application. If
+ it is not, nothing is done.
+
+ """
+ logger = logging.getLogger(name)
+ app = vitables.utils.getApp()
+ if app is not None:
+ stream = logging.StreamHandler(app.gui.logger)
+ stream.setFormatter(
+ logging.Formatter(vitables.vtgui._GUI_LOG_FORMAT)
+ )
+ logger.addHandler(stream)
+
+ return
+
+ _setup_logger(_defaults["MODULE_NAME"])
+
|
3ef77edcbf4b3268399f439b89f15ef087bd06bb
|
chamber/utils/logging.py
|
chamber/utils/logging.py
|
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
|
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder,
default=lambda x: '<<NON-SERIALIZABLE TYPE: {}>>'.format(type(x).__qualname__)))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
|
Set default value for json.dumps
|
Set default value for json.dumps
Use default value when type cannot be serialized.
|
Python
|
bsd-3-clause
|
druids/django-chamber
|
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
- record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder))
+ record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder,
+ default=lambda x: '<<NON-SERIALIZABLE TYPE: {}>>'.format(type(x).__qualname__)))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
|
Set default value for json.dumps
|
## Code Before:
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
## Instruction:
Set default value for json.dumps
## Code After:
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder,
default=lambda x: '<<NON-SERIALIZABLE TYPE: {}>>'.format(type(x).__qualname__)))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
|
import json
import logging
import platform
from django.core.serializers.json import DjangoJSONEncoder
from django.http import UnreadablePostError
def skip_unreadable_post(record):
if record.exc_info:
exc_type, exc_value = record.exc_info[:2]
if isinstance(exc_value, UnreadablePostError):
return False
return True
class AppendExtraJSONHandler(logging.StreamHandler):
DEFAULT_STREAM_HANDLER_VARIABLE_KEYS = {
'name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text',
'stack_info', 'lineno', 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName',
'processName', 'process',
}
CUSTOM_STREAM_HANDLER_VARIABLE_KEYS = {'hostname'}
def emit(self, record):
extra = {
k: v
for k, v in record.__dict__.items()
if k not in self.DEFAULT_STREAM_HANDLER_VARIABLE_KEYS.union(self.CUSTOM_STREAM_HANDLER_VARIABLE_KEYS)
}
- record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder))
? ^^
+ record.msg = '{} --- {}'.format(record.msg, json.dumps(extra, cls=DjangoJSONEncoder,
? ^
+ default=lambda x: '<<NON-SERIALIZABLE TYPE: {}>>'.format(type(x).__qualname__)))
super().emit(record)
class HostnameFilter(logging.Filter):
hostname = platform.node()
def filter(self, record):
record.hostname = self.hostname
return True
|
e105b44e4c07b43c36290a8f5d703f4ff0b26953
|
sqlshare_rest/util/query_queue.py
|
sqlshare_rest/util/query_queue.py
|
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
print "Finished: ", oldest_query.date_finished
oldest_query.save()
|
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
oldest_query.save()
|
Remove a print statement that was dumb and breaking python3
|
Remove a print statement that was dumb and breaking python3
|
Python
|
apache-2.0
|
uw-it-aca/sqlshare-rest,uw-it-aca/sqlshare-rest,uw-it-aca/sqlshare-rest,uw-it-aca/sqlshare-rest
|
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
- print "Finished: ", oldest_query.date_finished
oldest_query.save()
|
Remove a print statement that was dumb and breaking python3
|
## Code Before:
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
print "Finished: ", oldest_query.date_finished
oldest_query.save()
## Instruction:
Remove a print statement that was dumb and breaking python3
## Code After:
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
oldest_query.save()
|
from sqlshare_rest.util.db import get_backend
from sqlshare_rest.models import Query
from django.utils import timezone
def process_queue():
filtered = Query.objects.filter(is_finished=False)
try:
oldest_query = filtered.order_by('id')[:1].get()
except Query.DoesNotExist:
return
backend = get_backend()
try:
res = backend.run_query(oldest_query.sql, oldest_query.owner)
except Exception as ex:
oldest_query.has_error = True
oldest_query.error = str(ex)
oldest_query.is_finished = True
oldest_query.date_finished = timezone.now()
- print "Finished: ", oldest_query.date_finished
oldest_query.save()
|
4ab53bc73406396206ead375dd7b5e656fdc41b7
|
mycli/packages/special/utils.py
|
mycli/packages/special/utils.py
|
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
command = arg.strip()
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
|
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
|
Remove unused variable from `handle_cd_command`
|
Remove unused variable from `handle_cd_command`
|
Python
|
bsd-3-clause
|
mdsrosa/mycli,mdsrosa/mycli
|
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
- command = arg.strip()
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
|
Remove unused variable from `handle_cd_command`
|
## Code Before:
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
command = arg.strip()
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
## Instruction:
Remove unused variable from `handle_cd_command`
## Code After:
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
|
import os
import subprocess
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
- command = arg.strip()
directory = ''
error = False
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1]
try:
os.chdir(directory)
output = subprocess.check_output('pwd', stderr=subprocess.STDOUT, shell=True)
except OSError as e:
output, error = e.strerror, True
# formatting a nice output
if error:
output = "Error: {}".format(output)
else:
output = "Current directory: {}".format(output)
return output
|
627ceb6adff6a2f954048b7641ac3b68d19ef019
|
experiments/stop_motion_tool/stop_motion_tool.py
|
experiments/stop_motion_tool/stop_motion_tool.py
|
from cam import OpenCV_Cam
import cv2
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter('output.avi',fourcc, 3.0, (1920,1080), isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
|
from cam import OpenCV_Cam
import cv2
import os.path
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
fname="frame.png"
if os.path.isfile(fname):
prevFrame = cv2.imread(fname)
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter('output.avi',fourcc, 3.0, cam.size, isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE or key_code == 2228224:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
|
Add support for presenter and continuing frame capture
|
Add support for presenter and continuing frame capture
Click the right click of the presenter will trigger frame capture.
|
Python
|
mit
|
fatcloud/PyCV-time
|
from cam import OpenCV_Cam
import cv2
+ import os.path
+
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
+ fname="frame.png"
+ if os.path.isfile(fname):
+ prevFrame = cv2.imread(fname)
+
+
fourcc = cv2.cv.CV_FOURCC(*'XVID')
- video = cv2.VideoWriter('output.avi',fourcc, 3.0, (1920,1080), isColor =True)
+ video = cv2.VideoWriter('output.avi',fourcc, 3.0, cam.size, isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
-
- if key_code is KEY_SPACE:
+
+ if key_code is KEY_SPACE or key_code == 2228224:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
|
Add support for presenter and continuing frame capture
|
## Code Before:
from cam import OpenCV_Cam
import cv2
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter('output.avi',fourcc, 3.0, (1920,1080), isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
## Instruction:
Add support for presenter and continuing frame capture
## Code After:
from cam import OpenCV_Cam
import cv2
import os.path
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
fname="frame.png"
if os.path.isfile(fname):
prevFrame = cv2.imread(fname)
fourcc = cv2.cv.CV_FOURCC(*'XVID')
video = cv2.VideoWriter('output.avi',fourcc, 3.0, cam.size, isColor =True)
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
if key_code is KEY_SPACE or key_code == 2228224:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
|
from cam import OpenCV_Cam
import cv2
+ import os.path
+
cam = OpenCV_Cam(0)
cam.size = (1920, 1080)
KEY_ESC = 27
KEY_SPACE = ord(' ')
prevFrame = None
i = 0
+ fname="frame.png"
+ if os.path.isfile(fname):
+ prevFrame = cv2.imread(fname)
+
+
fourcc = cv2.cv.CV_FOURCC(*'XVID')
- video = cv2.VideoWriter('output.avi',fourcc, 3.0, (1920,1080), isColor =True)
? ^^^^^^^^^^^
+ video = cv2.VideoWriter('output.avi',fourcc, 3.0, cam.size, isColor =True)
? ^^^^^^^^
while True:
# Capture frame-by-frame
frame = cam.read()
# image processing functions
# Load the frame into a window named as 'Press any key to exit'
if (prevFrame is not None):
showFrame = cv2.addWeighted(frame,0.7,prevFrame,0.3,0)
else:
showFrame = frame
resizeShowFrame = cv2.resize(showFrame, (0,0), fx = 0.5, fy = 0.5 )
cv2.imshow('Press ESC to exit', resizeShowFrame)
# wait for the key
key_code = cv2.waitKey(10)
-
- if key_code is KEY_SPACE:
+
+ if key_code is KEY_SPACE or key_code == 2228224:
cv2.imwrite('frame'+str(i)+'.png', frame)
video.write(frame)
prevFrame = frame
i += 1
elif key_code is KEY_ESC:
break
cv2.destroyAllWindows()
cam.release()
video.release()
|
0207b0ea61050d8728e084277b14015bd92a8beb
|
tests/integration/test_kinesis.py
|
tests/integration/test_kinesis.py
|
from tests import unittest
import itertools
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.service = self.session.get_service('kinesis')
self.endpoint = self.service.get_endpoint('us-east-1')
def test_list_streams(self):
operation = self.service.get_operation('ListStreams')
http, parsed = operation.call(self.endpoint)
self.assertEqual(http.status_code, 200)
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
|
from tests import unittest
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('kinesis', 'us-east-1')
def test_list_streams(self):
parsed = self.client.list_streams()
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
|
Switch kinesis integ tests over to client interface
|
Switch kinesis integ tests over to client interface
|
Python
|
apache-2.0
|
pplu/botocore,boto/botocore
|
-
from tests import unittest
- import itertools
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
+ self.client = self.session.create_client('kinesis', 'us-east-1')
- self.service = self.session.get_service('kinesis')
- self.endpoint = self.service.get_endpoint('us-east-1')
def test_list_streams(self):
+ parsed = self.client.list_streams()
- operation = self.service.get_operation('ListStreams')
- http, parsed = operation.call(self.endpoint)
- self.assertEqual(http.status_code, 200)
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
|
Switch kinesis integ tests over to client interface
|
## Code Before:
from tests import unittest
import itertools
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.service = self.session.get_service('kinesis')
self.endpoint = self.service.get_endpoint('us-east-1')
def test_list_streams(self):
operation = self.service.get_operation('ListStreams')
http, parsed = operation.call(self.endpoint)
self.assertEqual(http.status_code, 200)
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
## Instruction:
Switch kinesis integ tests over to client interface
## Code After:
from tests import unittest
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('kinesis', 'us-east-1')
def test_list_streams(self):
parsed = self.client.list_streams()
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
|
-
from tests import unittest
- import itertools
import botocore.session
class TestKinesisListStreams(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
+ self.client = self.session.create_client('kinesis', 'us-east-1')
- self.service = self.session.get_service('kinesis')
- self.endpoint = self.service.get_endpoint('us-east-1')
def test_list_streams(self):
+ parsed = self.client.list_streams()
- operation = self.service.get_operation('ListStreams')
- http, parsed = operation.call(self.endpoint)
- self.assertEqual(http.status_code, 200)
self.assertIn('StreamNames', parsed)
if __name__ == '__main__':
unittest.main()
|
41a83c6742f0e688dad5a98761c0f0415c77bac9
|
outgoing_mail.py
|
outgoing_mail.py
|
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = '"EventBot" <[email protected]>'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
|
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from datetime import datetime
import os
from_address = '"EventBot" <[email protected]>'
email_interval = 10
def send(to, template_name, values):
"""Send an email to the specified address using a template. No
more than one email per EMAIL_INTERVAL seconds will be sent to any
given address.
"""
last_action = memcache.get(to, namespace='last_action')
if last_action != None:
return
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
memcache.set(to, datetime.now(), time=email_interval, namespace='last_action')
|
Use memcache to rate-limit outgoing emails.
|
Use memcache to rate-limit outgoing emails.
|
Python
|
mit
|
eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot,eentzel/myeventbot
|
from google.appengine.api import mail
from google.appengine.ext.webapp import template
+ from google.appengine.api import memcache
+ from datetime import datetime
import os
from_address = '"EventBot" <[email protected]>'
+ email_interval = 10
def send(to, template_name, values):
+ """Send an email to the specified address using a template. No
+ more than one email per EMAIL_INTERVAL seconds will be sent to any
+ given address.
+ """
+ last_action = memcache.get(to, namespace='last_action')
+ if last_action != None:
+ return
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
+ memcache.set(to, datetime.now(), time=email_interval, namespace='last_action')
|
Use memcache to rate-limit outgoing emails.
|
## Code Before:
from google.appengine.api import mail
from google.appengine.ext.webapp import template
import os
from_address = '"EventBot" <[email protected]>'
def send(to, template_name, values):
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
## Instruction:
Use memcache to rate-limit outgoing emails.
## Code After:
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from datetime import datetime
import os
from_address = '"EventBot" <[email protected]>'
email_interval = 10
def send(to, template_name, values):
"""Send an email to the specified address using a template. No
more than one email per EMAIL_INTERVAL seconds will be sent to any
given address.
"""
last_action = memcache.get(to, namespace='last_action')
if last_action != None:
return
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
memcache.set(to, datetime.now(), time=email_interval, namespace='last_action')
|
from google.appengine.api import mail
from google.appengine.ext.webapp import template
+ from google.appengine.api import memcache
+ from datetime import datetime
import os
from_address = '"EventBot" <[email protected]>'
+ email_interval = 10
def send(to, template_name, values):
+ """Send an email to the specified address using a template. No
+ more than one email per EMAIL_INTERVAL seconds will be sent to any
+ given address.
+ """
+ last_action = memcache.get(to, namespace='last_action')
+ if last_action != None:
+ return
path = os.path.join(os.path.dirname(__file__), 'email_templates', template_name)
message = mail.EmailMessage(sender=from_address, to=to)
message.subject = template.render(path + '.subject', values)
message.body = template.render(path + '.body', values)
message.send()
+ memcache.set(to, datetime.now(), time=email_interval, namespace='last_action')
|
c4109fadf0a66db5af0e579600a70e4b7e28493d
|
csdms/dakota/experiment.py
|
csdms/dakota/experiment.py
|
"""A template for describing a Dakota experiment."""
import os
import importlib
import inspect
blocks = ['environment', 'method', 'variables', 'interface', 'responses']
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
self.environment = self._import('environment', 'environment', **kwargs)
self.method = self._import('method', method, **kwargs)
self.variables = self._import('variables', variables, **kwargs)
self.interface = self._import('interface', interface, **kwargs)
self.responses = self._import('responses', responses, **kwargs)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
for section in blocks:
s += str(getattr(self, section))
return s
|
"""A template for describing a Dakota experiment."""
import os
import importlib
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
environment='environment',
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
self._blocks = ('environment', 'method', 'variables',
'interface', 'responses')
for section in self._blocks:
cls = self._import(section, eval(section), **kwargs)
setattr(self, section, cls)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
for section in self._blocks:
s += str(getattr(self, section))
return s
|
Refactor init method with _blocks attribute
|
Refactor init method with _blocks attribute
|
Python
|
mit
|
csdms/dakota,csdms/dakota
|
"""A template for describing a Dakota experiment."""
import os
import importlib
- import inspect
-
-
- blocks = ['environment', 'method', 'variables', 'interface', 'responses']
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
+ environment='environment',
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
- self.environment = self._import('environment', 'environment', **kwargs)
- self.method = self._import('method', method, **kwargs)
- self.variables = self._import('variables', variables, **kwargs)
- self.interface = self._import('interface', interface, **kwargs)
- self.responses = self._import('responses', responses, **kwargs)
+ self._blocks = ('environment', 'method', 'variables',
+ 'interface', 'responses')
+ for section in self._blocks:
+ cls = self._import(section, eval(section), **kwargs)
+ setattr(self, section, cls)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
- for section in blocks:
+ for section in self._blocks:
s += str(getattr(self, section))
return s
|
Refactor init method with _blocks attribute
|
## Code Before:
"""A template for describing a Dakota experiment."""
import os
import importlib
import inspect
blocks = ['environment', 'method', 'variables', 'interface', 'responses']
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
self.environment = self._import('environment', 'environment', **kwargs)
self.method = self._import('method', method, **kwargs)
self.variables = self._import('variables', variables, **kwargs)
self.interface = self._import('interface', interface, **kwargs)
self.responses = self._import('responses', responses, **kwargs)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
for section in blocks:
s += str(getattr(self, section))
return s
## Instruction:
Refactor init method with _blocks attribute
## Code After:
"""A template for describing a Dakota experiment."""
import os
import importlib
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
environment='environment',
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
self._blocks = ('environment', 'method', 'variables',
'interface', 'responses')
for section in self._blocks:
cls = self._import(section, eval(section), **kwargs)
setattr(self, section, cls)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
for section in self._blocks:
s += str(getattr(self, section))
return s
|
"""A template for describing a Dakota experiment."""
import os
import importlib
- import inspect
-
-
- blocks = ['environment', 'method', 'variables', 'interface', 'responses']
class Experiment(object):
"""Describe parameters to create an input file for a Dakota experiment."""
def __init__(self,
+ environment='environment',
method='vector_parameter_study',
variables='continuous_design',
interface='direct',
responses='response_functions',
**kwargs):
"""Create a set of default experiment parameters."""
- self.environment = self._import('environment', 'environment', **kwargs)
- self.method = self._import('method', method, **kwargs)
- self.variables = self._import('variables', variables, **kwargs)
- self.interface = self._import('interface', interface, **kwargs)
- self.responses = self._import('responses', responses, **kwargs)
+ self._blocks = ('environment', 'method', 'variables',
+ 'interface', 'responses')
+ for section in self._blocks:
+ cls = self._import(section, eval(section), **kwargs)
+ setattr(self, section, cls)
def _get_subpackage_namespace(self, subpackage):
return os.path.splitext(self.__module__)[0] + '.' + subpackage
def _import(self, subpackage, module, **kwargs):
namespace = self._get_subpackage_namespace(subpackage) + '.' + module
module = importlib.import_module(namespace)
cls = getattr(module, module.classname)
return cls(**kwargs)
def __str__(self):
s = '# Dakota input file\n'
- for section in blocks:
+ for section in self._blocks:
? ++++++
s += str(getattr(self, section))
return s
|
22c56941d054e083b1d406b1440efd8c0ecc5f11
|
tests/test_oai_harvester.py
|
tests/test_oai_harvester.py
|
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
def harvest(self, days_back=1):
start_date = '2015-03-14'
end_date = '2015-03-16'
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
|
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
def harvest(self, start_date='2015-03-14', end_date='2015-03-16'):
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
|
Add dates to test OAI harvester
|
Add dates to test OAI harvester
|
Python
|
apache-2.0
|
erinspace/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,icereval/scrapi,fabianvf/scrapi,mehanig/scrapi,alexgarciac/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,felliott/scrapi,jeffreyliu3230/scrapi,ostwald/scrapi,erinspace/scrapi,fabianvf/scrapi
|
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
+ def harvest(self, start_date='2015-03-14', end_date='2015-03-16'):
- def harvest(self, days_back=1):
-
- start_date = '2015-03-14'
- end_date = '2015-03-16'
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
|
Add dates to test OAI harvester
|
## Code Before:
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
def harvest(self, days_back=1):
start_date = '2015-03-14'
end_date = '2015-03-16'
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
## Instruction:
Add dates to test OAI harvester
## Code After:
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
def harvest(self, start_date='2015-03-14', end_date='2015-03-16'):
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
|
from __future__ import unicode_literals
import httpretty
from scrapi.base import OAIHarvester
from scrapi.linter import RawDocument
from .utils import TEST_OAI_DOC
class TestHarvester(OAIHarvester):
base_url = ''
long_name = 'Test'
short_name = 'test'
url = 'test'
property_list = ['type', 'source', 'publisher', 'format', 'date']
@httpretty.activate
+ def harvest(self, start_date='2015-03-14', end_date='2015-03-16'):
- def harvest(self, days_back=1):
-
- start_date = '2015-03-14'
- end_date = '2015-03-16'
request_url = 'http://validAI.edu/?from={}&to={}'.format(start_date, end_date)
httpretty.register_uri(httpretty.GET, request_url,
body=TEST_OAI_DOC,
content_type="application/XML")
records = self.get_records(request_url, start_date, end_date)
return [RawDocument({
'doc': str(TEST_OAI_DOC),
'source': 'crossref',
'filetype': 'XML',
'docID': "1"
}) for record in records]
class TestOAIHarvester(object):
def setup_method(self, method):
self.harvester = TestHarvester()
def test_normalize(self):
results = [
self.harvester.normalize(record) for record in self.harvester.harvest()
]
for res in results:
assert res['title'] == 'Test'
|
8d46db626298f2d21f4f1d8b6f75fdc08bd761dc
|
zinnia/models/author.py
|
zinnia/models/author.py
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
@python_2_unicode_compatible
class Author(get_user_model()):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
objects = get_user_model()._default_manager
published = EntryRelatedPublishedManager()
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
class AuthorManagers(models.Model):
published = EntryRelatedPublishedManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Author(get_user_model(), AuthorManagers):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
Move Author Managers into an abstract base class
|
Move Author Managers into an abstract base class
Copying of the default manager causes the source model to become poluted.
To supply additional managers without replacing the default manager,
the Django docs recommend inheriting from an abstract base class.
https://docs.djangoproject.com/en/dev/topics/db/models/#proxy-model-managers
|
Python
|
bsd-3-clause
|
bywbilly/django-blog-zinnia,Zopieux/django-blog-zinnia,petecummings/django-blog-zinnia,ZuluPro/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,Maplecroft/django-blog-zinnia,petecummings/django-blog-zinnia,Fantomas42/django-blog-zinnia,1844144/django-blog-zinnia,marctc/django-blog-zinnia,Fantomas42/django-blog-zinnia,ghachey/django-blog-zinnia,Zopieux/django-blog-zinnia,1844144/django-blog-zinnia,dapeng0802/django-blog-zinnia,ghachey/django-blog-zinnia,marctc/django-blog-zinnia,ghachey/django-blog-zinnia,bywbilly/django-blog-zinnia,ZuluPro/django-blog-zinnia,ZuluPro/django-blog-zinnia,1844144/django-blog-zinnia,dapeng0802/django-blog-zinnia,extertioner/django-blog-zinnia,Maplecroft/django-blog-zinnia,aorzh/django-blog-zinnia,Maplecroft/django-blog-zinnia,bywbilly/django-blog-zinnia,Fantomas42/django-blog-zinnia,extertioner/django-blog-zinnia,dapeng0802/django-blog-zinnia,Zopieux/django-blog-zinnia,aorzh/django-blog-zinnia,extertioner/django-blog-zinnia,aorzh/django-blog-zinnia
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
+ class AuthorManagers(models.Model):
+ published = EntryRelatedPublishedManager()
+
+ class Meta:
+ abstract = True
@python_2_unicode_compatible
- class Author(get_user_model()):
+ class Author(get_user_model(), AuthorManagers):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
-
- objects = get_user_model()._default_manager
- published = EntryRelatedPublishedManager()
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
Move Author Managers into an abstract base class
|
## Code Before:
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
@python_2_unicode_compatible
class Author(get_user_model()):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
objects = get_user_model()._default_manager
published = EntryRelatedPublishedManager()
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
## Instruction:
Move Author Managers into an abstract base class
## Code After:
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
class AuthorManagers(models.Model):
published = EntryRelatedPublishedManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Author(get_user_model(), AuthorManagers):
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
"""Author model for Zinnia"""
from django.db import models
from django.contrib.auth import get_user_model
from django.utils.encoding import python_2_unicode_compatible
from zinnia.managers import entries_published
from zinnia.managers import EntryRelatedPublishedManager
+ class AuthorManagers(models.Model):
+ published = EntryRelatedPublishedManager()
+
+ class Meta:
+ abstract = True
@python_2_unicode_compatible
- class Author(get_user_model()):
+ class Author(get_user_model(), AuthorManagers):
? ++++++++++++++++
"""
Proxy model around :class:`django.contrib.auth.models.get_user_model`.
"""
-
- objects = get_user_model()._default_manager
- published = EntryRelatedPublishedManager()
def entries_published(self):
"""
Returns author's published entries.
"""
return entries_published(self.entries)
@models.permalink
def get_absolute_url(self):
"""
Builds and returns the author's URL based on his username.
"""
return ('zinnia_author_detail', [self.get_username()])
def __str__(self):
"""
If the user has a full name, use it instead of the username.
"""
return self.get_full_name() or self.get_username()
class Meta:
"""
Author's meta informations.
"""
app_label = 'zinnia'
proxy = True
|
96f9819ab67b48135a61c8a1e15bc808cf82d194
|
bokeh/models/widget.py
|
bokeh/models/widget.py
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
class Widget(PlotObject):
disabled = Bool(False)
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
from ..embed import notebook_div
class Widget(PlotObject):
disabled = Bool(False)
def _repr_html_(self):
return notebook_div(self)
@property
def html(self):
from IPython.core.display import HTML
return HTML(self._repr_html_())
|
Implement display protocol for Widget (_repr_html_)
|
Implement display protocol for Widget (_repr_html_)
This effectively allows us to automatically display plots and widgets.
|
Python
|
bsd-3-clause
|
evidation-health/bokeh,abele/bokeh,mutirri/bokeh,percyfal/bokeh,htygithub/bokeh,jakirkham/bokeh,rhiever/bokeh,DuCorey/bokeh,srinathv/bokeh,DuCorey/bokeh,awanke/bokeh,clairetang6/bokeh,ericdill/bokeh,ahmadia/bokeh,saifrahmed/bokeh,mutirri/bokeh,bokeh/bokeh,gpfreitas/bokeh,philippjfr/bokeh,xguse/bokeh,srinathv/bokeh,draperjames/bokeh,schoolie/bokeh,laurent-george/bokeh,paultcochrane/bokeh,akloster/bokeh,caseyclements/bokeh,justacec/bokeh,maxalbert/bokeh,philippjfr/bokeh,birdsarah/bokeh,evidation-health/bokeh,rs2/bokeh,phobson/bokeh,PythonCharmers/bokeh,draperjames/bokeh,satishgoda/bokeh,mindriot101/bokeh,PythonCharmers/bokeh,CrazyGuo/bokeh,mindriot101/bokeh,birdsarah/bokeh,jplourenco/bokeh,matbra/bokeh,htygithub/bokeh,deeplook/bokeh,abele/bokeh,bsipocz/bokeh,rhiever/bokeh,laurent-george/bokeh,ericmjl/bokeh,htygithub/bokeh,DuCorey/bokeh,justacec/bokeh,PythonCharmers/bokeh,msarahan/bokeh,mutirri/bokeh,percyfal/bokeh,timsnyder/bokeh,timsnyder/bokeh,muku42/bokeh,deeplook/bokeh,xguse/bokeh,daodaoliang/bokeh,ChristosChristofidis/bokeh,ericmjl/bokeh,timothydmorton/bokeh,percyfal/bokeh,schoolie/bokeh,alan-unravel/bokeh,jplourenco/bokeh,canavandl/bokeh,Karel-van-de-Plassche/bokeh,bokeh/bokeh,evidation-health/bokeh,Karel-van-de-Plassche/bokeh,tacaswell/bokeh,bsipocz/bokeh,mutirri/bokeh,deeplook/bokeh,dennisobrien/bokeh,msarahan/bokeh,quasiben/bokeh,roxyboy/bokeh,josherick/bokeh,mindriot101/bokeh,saifrahmed/bokeh,rothnic/bokeh,CrazyGuo/bokeh,canavandl/bokeh,aiguofer/bokeh,akloster/bokeh,clairetang6/bokeh,almarklein/bokeh,josherick/bokeh,aiguofer/bokeh,timothydmorton/bokeh,ptitjano/bokeh,KasperPRasmussen/bokeh,mindriot101/bokeh,aavanian/bokeh,josherick/bokeh,quasiben/bokeh,xguse/bokeh,saifrahmed/bokeh,KasperPRasmussen/bokeh,akloster/bokeh,awanke/bokeh,ptitjano/bokeh,aavanian/bokeh,azjps/bokeh,tacaswell/bokeh,draperjames/bokeh,alan-unravel/bokeh,ericmjl/bokeh,rs2/bokeh,bokeh/bokeh,stonebig/bokeh,tacaswell/bokeh,ChinaQuants/bokeh,stonebig/bokeh,stuart-knock/bokeh,paultcochrane/bokeh,xguse/bokeh,jakirkham/bokeh,abele/bokeh,alan-unravel/bokeh,KasperPRasmussen/bokeh,birdsarah/bokeh,stuart-knock/bokeh,Karel-van-de-Plassche/bokeh,carlvlewis/bokeh,gpfreitas/bokeh,dennisobrien/bokeh,deeplook/bokeh,alan-unravel/bokeh,lukebarnard1/bokeh,jakirkham/bokeh,ahmadia/bokeh,aavanian/bokeh,phobson/bokeh,clairetang6/bokeh,timsnyder/bokeh,ptitjano/bokeh,ahmadia/bokeh,lukebarnard1/bokeh,rs2/bokeh,tacaswell/bokeh,ericdill/bokeh,matbra/bokeh,satishgoda/bokeh,awanke/bokeh,rothnic/bokeh,evidation-health/bokeh,jplourenco/bokeh,muku42/bokeh,CrazyGuo/bokeh,roxyboy/bokeh,bokeh/bokeh,caseyclements/bokeh,jplourenco/bokeh,matbra/bokeh,gpfreitas/bokeh,ChinaQuants/bokeh,ChinaQuants/bokeh,KasperPRasmussen/bokeh,dennisobrien/bokeh,saifrahmed/bokeh,timothydmorton/bokeh,rhiever/bokeh,timsnyder/bokeh,maxalbert/bokeh,DuCorey/bokeh,azjps/bokeh,birdsarah/bokeh,satishgoda/bokeh,stonebig/bokeh,srinathv/bokeh,rs2/bokeh,aiguofer/bokeh,schoolie/bokeh,rothnic/bokeh,philippjfr/bokeh,laurent-george/bokeh,stonebig/bokeh,matbra/bokeh,justacec/bokeh,maxalbert/bokeh,percyfal/bokeh,jakirkham/bokeh,eteq/bokeh,eteq/bokeh,rs2/bokeh,philippjfr/bokeh,daodaoliang/bokeh,ericdill/bokeh,azjps/bokeh,khkaminska/bokeh,draperjames/bokeh,philippjfr/bokeh,almarklein/bokeh,canavandl/bokeh,ericmjl/bokeh,clairetang6/bokeh,ptitjano/bokeh,srinathv/bokeh,KasperPRasmussen/bokeh,ericmjl/bokeh,htygithub/bokeh,carlvlewis/bokeh,ptitjano/bokeh,aiguofer/bokeh,laurent-george/bokeh,lukebarnard1/bokeh,ChristosChristofidis/bokeh,abele/bokeh,ChristosChristofidis/bokeh,azjps/bokeh,draperjames/bokeh,jakirkham/bokeh,roxyboy/bokeh,Karel-van-de-Plassche/bokeh,roxyboy/bokeh,khkaminska/bokeh,phobson/bokeh,caseyclements/bokeh,paultcochrane/bokeh,percyfal/bokeh,caseyclements/bokeh,muku42/bokeh,eteq/bokeh,msarahan/bokeh,aiguofer/bokeh,almarklein/bokeh,ChinaQuants/bokeh,ericdill/bokeh,PythonCharmers/bokeh,khkaminska/bokeh,carlvlewis/bokeh,canavandl/bokeh,bokeh/bokeh,timsnyder/bokeh,eteq/bokeh,muku42/bokeh,rothnic/bokeh,ahmadia/bokeh,timothydmorton/bokeh,DuCorey/bokeh,stuart-knock/bokeh,bsipocz/bokeh,phobson/bokeh,dennisobrien/bokeh,stuart-knock/bokeh,CrazyGuo/bokeh,aavanian/bokeh,schoolie/bokeh,phobson/bokeh,dennisobrien/bokeh,akloster/bokeh,bsipocz/bokeh,paultcochrane/bokeh,josherick/bokeh,daodaoliang/bokeh,schoolie/bokeh,rhiever/bokeh,maxalbert/bokeh,satishgoda/bokeh,ChristosChristofidis/bokeh,msarahan/bokeh,carlvlewis/bokeh,justacec/bokeh,Karel-van-de-Plassche/bokeh,lukebarnard1/bokeh,daodaoliang/bokeh,azjps/bokeh,awanke/bokeh,khkaminska/bokeh,gpfreitas/bokeh,aavanian/bokeh,quasiben/bokeh
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
+ from ..embed import notebook_div
class Widget(PlotObject):
disabled = Bool(False)
+ def _repr_html_(self):
+ return notebook_div(self)
+
+ @property
+ def html(self):
+ from IPython.core.display import HTML
+ return HTML(self._repr_html_())
+
|
Implement display protocol for Widget (_repr_html_)
|
## Code Before:
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
class Widget(PlotObject):
disabled = Bool(False)
## Instruction:
Implement display protocol for Widget (_repr_html_)
## Code After:
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
from ..embed import notebook_div
class Widget(PlotObject):
disabled = Bool(False)
def _repr_html_(self):
return notebook_div(self)
@property
def html(self):
from IPython.core.display import HTML
return HTML(self._repr_html_())
|
from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import Bool
+ from ..embed import notebook_div
class Widget(PlotObject):
disabled = Bool(False)
+
+ def _repr_html_(self):
+ return notebook_div(self)
+
+ @property
+ def html(self):
+ from IPython.core.display import HTML
+ return HTML(self._repr_html_())
|
e2132caf1c677b34eddd679e23983022ec12b5df
|
watermarker/conf.py
|
watermarker/conf.py
|
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
WATERMARK_PERCENTAGE = 30
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
|
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
WATERMARK_PERCENTAGE = getattr(settings, 'WATERMARK_PERCENTAGE', 30)
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
|
Change AppConf class to use settings defined value or default.
|
Change AppConf class to use settings defined value or default.
|
Python
|
bsd-3-clause
|
lzanuz/django-watermark,lzanuz/django-watermark
|
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
- WATERMARK_PERCENTAGE = 30
+ WATERMARK_PERCENTAGE = getattr(settings, 'WATERMARK_PERCENTAGE', 30)
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
|
Change AppConf class to use settings defined value or default.
|
## Code Before:
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
WATERMARK_PERCENTAGE = 30
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
## Instruction:
Change AppConf class to use settings defined value or default.
## Code After:
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
WATERMARK_PERCENTAGE = getattr(settings, 'WATERMARK_PERCENTAGE', 30)
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
|
import warnings
from django.conf import settings # pylint: disable=W0611
from appconf import AppConf
class WatermarkSettings(AppConf):
QUALITY = 85
OBSCURE_ORIGINAL = True
RANDOM_POSITION_ONCE = True
- WATERMARK_PERCENTAGE = 30
+ WATERMARK_PERCENTAGE = getattr(settings, 'WATERMARK_PERCENTAGE', 30)
class Meta:
prefix = 'watermark'
holder = 'watermarker.conf.settings'
def configure_quality(self, value):
if getattr(settings, 'WATERMARKING_QUALITY', None):
warnings.warn("WATERMARKING_QUALITY is deprecated, use WATERMARK_QUALITY", DeprecationWarning)
return value
|
2e897f7dce89d4b52c3507c62e7120ee238b713c
|
database/database_setup.py
|
database/database_setup.py
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
engine = create_engine('sqlite:///productcatalog.db')
Base.metadata.create_all(engine)
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
engine = create_engine('postgresql://catalog:catalog123!@localhost:8000/catalog')
Base.metadata.create_all(engine)
|
Connect database engine to postgresql
|
feat: Connect database engine to postgresql
|
Python
|
mit
|
caasted/aws-flask-catalog-app,caasted/aws-flask-catalog-app
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
- engine = create_engine('sqlite:///productcatalog.db')
+ engine = create_engine('postgresql://catalog:catalog123!@localhost:8000/catalog')
Base.metadata.create_all(engine)
|
Connect database engine to postgresql
|
## Code Before:
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
engine = create_engine('sqlite:///productcatalog.db')
Base.metadata.create_all(engine)
## Instruction:
Connect database engine to postgresql
## Code After:
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
engine = create_engine('postgresql://catalog:catalog123!@localhost:8000/catalog')
Base.metadata.create_all(engine)
|
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from models.base import Base
from models.user import User
from models.store import Store
from models.product import Product
- engine = create_engine('sqlite:///productcatalog.db')
+ engine = create_engine('postgresql://catalog:catalog123!@localhost:8000/catalog')
Base.metadata.create_all(engine)
|
4e94cef9f6617827341af443ac428b9ccc190535
|
lib/recommend-by-url.py
|
lib/recommend-by-url.py
|
from newspaper import Article
from goose import Goose
import json
import sys
article = Article(sys.argv[1])
article.download()
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
|
from newspaper import Article
from goose import Goose
import requests
import json
import sys
article = Article(sys.argv[1])
article.download()
if not article.html:
r = requests.get(sys.argv[1], verify=False, headers={ 'User-Agent': 'Mozilla/5.0' })
article.set_html(r.text)
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
|
Improve reliablity of python article fetcher
|
Improve reliablity of python article fetcher
|
Python
|
mit
|
lateral/feed-feeder,lateral/feed-feeder,lateral/feed-feeder,lateral/feed-feeder
|
from newspaper import Article
from goose import Goose
+ import requests
import json
import sys
article = Article(sys.argv[1])
article.download()
+ if not article.html:
+ r = requests.get(sys.argv[1], verify=False, headers={ 'User-Agent': 'Mozilla/5.0' })
+ article.set_html(r.text)
+
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
|
Improve reliablity of python article fetcher
|
## Code Before:
from newspaper import Article
from goose import Goose
import json
import sys
article = Article(sys.argv[1])
article.download()
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
## Instruction:
Improve reliablity of python article fetcher
## Code After:
from newspaper import Article
from goose import Goose
import requests
import json
import sys
article = Article(sys.argv[1])
article.download()
if not article.html:
r = requests.get(sys.argv[1], verify=False, headers={ 'User-Agent': 'Mozilla/5.0' })
article.set_html(r.text)
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
|
from newspaper import Article
from goose import Goose
+ import requests
import json
import sys
article = Article(sys.argv[1])
article.download()
+ if not article.html:
+ r = requests.get(sys.argv[1], verify=False, headers={ 'User-Agent': 'Mozilla/5.0' })
+ article.set_html(r.text)
+
article.parse()
article.nlp()
published = ''
if article.publish_date:
published = article.publish_date.strftime("%Y-%m-%d %H:%M:%S")
# Get body with goose
g = Goose()
goose_article = g.extract(raw_html=article.html)
body = goose_article.cleaned_text
summary = goose_article.meta_description
# Maybe use https://github.com/xiaoxu193/PyTeaser
if not summary:
summary = article.summary
if not body or len(body) < len(article.text):
body = article.text
json_str = json.dumps({
'author': ", ".join(article.authors),
'image': article.top_image,
'keywords': article.keywords,
'published': published,
'summary': summary,
'body': body,
'title': article.title,
'videos': article.movies
}, sort_keys=True, ensure_ascii=False)
print(json_str)
|
e5b503d0e66f8422412d0cdeac4ba4f55f14e420
|
spectrum/object.py
|
spectrum/object.py
|
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
This is the class that will be the base class of most objects, since most
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
|
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
This class is the base class of most objects, since most
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
|
Change wording from future to present tense
|
Documentation: Change wording from future to present tense
|
Python
|
mit
|
treefroog/spectrum.py
|
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
- This is the class that will be the base class of most objects, since most
+ This class is the base class of most objects, since most
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
|
Change wording from future to present tense
|
## Code Before:
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
This is the class that will be the base class of most objects, since most
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
## Instruction:
Change wording from future to present tense
## Code After:
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
This class is the base class of most objects, since most
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
|
class Object:
"""Represents a generic Spectrum object
Supported Operations:
+-----------+--------------------------------------+
| Operation | Description |
+===========+======================================+
| x == y | Checks if two objects are equal. |
+-----------+--------------------------------------+
| x != y | Checks if two objects are not equal. |
+-----------+--------------------------------------+
- This is the class that will be the base class of most objects, since most
? ------- ------ ^^^^^ -
+ This class is the base class of most objects, since most
? ^
have an ID number.
id : int
The ID of the object
"""
def __init__(self, id):
self.id = int(id)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.id == self.id
def __ne__(self, other):
if isinstance(other, self.__class__):
return other.id != self.id
return True
|
fa1a08aed5bc6659304097d5ad7e653c553c1b11
|
cactus/utils/file.py
|
cactus/utils/file.py
|
import os
import cStringIO
import gzip
import hashlib
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
with open(path, 'rb') as f:
return checksum(f.read())
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
|
import os
import cStringIO
import gzip
import hashlib
import subprocess
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
# with open(path, 'rb') as f:
# return checksum(f.read())
output = subprocess.check_output(["md5", path])
md5 = output.split(" = ")[1].strip()
return md5
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
|
Use terminal md5 for perf
|
Use terminal md5 for perf
|
Python
|
bsd-3-clause
|
koenbok/Cactus,danielmorosan/Cactus,juvham/Cactus,dreadatour/Cactus,Bluetide/Cactus,chaudum/Cactus,koobs/Cactus,chaudum/Cactus,PegasusWang/Cactus,juvham/Cactus,eudicots/Cactus,Knownly/Cactus,danielmorosan/Cactus,page-io/Cactus,juvham/Cactus,fjxhkj/Cactus,koobs/Cactus,ibarria0/Cactus,PegasusWang/Cactus,danielmorosan/Cactus,Bluetide/Cactus,fjxhkj/Cactus,page-io/Cactus,andyzsf/Cactus-,fjxhkj/Cactus,eudicots/Cactus,chaudum/Cactus,PegasusWang/Cactus,koenbok/Cactus,dreadatour/Cactus,gone/Cactus,Bluetide/Cactus,ibarria0/Cactus,dreadatour/Cactus,gone/Cactus,koobs/Cactus,ibarria0/Cactus,koenbok/Cactus,page-io/Cactus,andyzsf/Cactus-,eudicots/Cactus,Knownly/Cactus,andyzsf/Cactus-,Knownly/Cactus,gone/Cactus
|
import os
import cStringIO
import gzip
import hashlib
+ import subprocess
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
- with open(path, 'rb') as f:
+ # with open(path, 'rb') as f:
- return checksum(f.read())
+ # return checksum(f.read())
+ output = subprocess.check_output(["md5", path])
+ md5 = output.split(" = ")[1].strip()
+ return md5
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
|
Use terminal md5 for perf
|
## Code Before:
import os
import cStringIO
import gzip
import hashlib
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
with open(path, 'rb') as f:
return checksum(f.read())
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
## Instruction:
Use terminal md5 for perf
## Code After:
import os
import cStringIO
import gzip
import hashlib
import subprocess
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
# with open(path, 'rb') as f:
# return checksum(f.read())
output = subprocess.check_output(["md5", path])
md5 = output.split(" = ")[1].strip()
return md5
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
|
import os
import cStringIO
import gzip
import hashlib
+ import subprocess
from cactus.utils.helpers import checksum
class FakeTime:
"""
Monkey-patch gzip.time to avoid changing files every time we deploy them.
"""
def time(self):
return 1111111111.111
def compressString(s):
"""Gzip a given string."""
gzip.time = FakeTime()
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
def fileSize(num):
for x in ['b', 'kb', 'mb', 'gb', 'tb']:
if num < 1024.0:
return "%.0f%s" % (num, x)
num /= 1024.0
def calculate_file_checksum(path):
"""
Calculate the MD5 sum for a file (needs to fit in memory)
"""
- with open(path, 'rb') as f:
+ # with open(path, 'rb') as f:
? ++
- return checksum(f.read())
+ # return checksum(f.read())
? ++
+ output = subprocess.check_output(["md5", path])
+ md5 = output.split(" = ")[1].strip()
+ return md5
def file_changed_hash(path):
info = os.stat(path)
hashKey = str(info.st_mtime) + str(info.st_size)
return checksum(hashKey)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.