hexsha
stringlengths 40
40
| repo
stringlengths 5
121
| path
stringlengths 4
227
| license
list | language
stringclasses 1
value | identifier
stringlengths 1
107
| return_type
stringlengths 2
237
⌀ | original_string
stringlengths 75
13.4k
| original_docstring
stringlengths 13
12.9k
| docstring
stringlengths 13
2.57k
| docstring_tokens
list | code
stringlengths 23
1.88k
| code_tokens
list | short_docstring
stringlengths 1
1.32k
| short_docstring_tokens
list | comment
list | parameters
list | docstring_params
dict | code_with_imports
stringlengths 23
1.88k
| idxs
int64 0
611k
| cluster
int64 0
1.02k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a0adb3f306187ac19b45a17c5fefbe247e86c5a
|
xmnlab/bodoai-examples
|
bodoai_examples/fileio/parquet.py
|
[
"BSD-3-Clause"
] |
Python
|
read
|
<not_specific>
|
def read(df, filepath):
"""
Read a parquet file from AWS S3 alike service in pandas dataframe format.
Parameters
----------
filepath : str
Returns
-------
Pandas.DataFrame
"""
return df.read_parquet(filepath)
|
Read a parquet file from AWS S3 alike service in pandas dataframe format.
Parameters
----------
filepath : str
Returns
-------
Pandas.DataFrame
|
Read a parquet file from AWS S3 alike service in pandas dataframe format.
Parameters
filepath : str
Returns
|
[
"Read",
"a",
"parquet",
"file",
"from",
"AWS",
"S3",
"alike",
"service",
"in",
"pandas",
"dataframe",
"format",
".",
"Parameters",
"filepath",
":",
"str",
"Returns"
] |
def read(df, filepath):
return df.read_parquet(filepath)
|
[
"def",
"read",
"(",
"df",
",",
"filepath",
")",
":",
"return",
"df",
".",
"read_parquet",
"(",
"filepath",
")"
] |
Read a parquet file from AWS S3 alike service in pandas dataframe format.
|
[
"Read",
"a",
"parquet",
"file",
"from",
"AWS",
"S3",
"alike",
"service",
"in",
"pandas",
"dataframe",
"format",
"."
] |
[
"\"\"\"\n Read a parquet file from AWS S3 alike service in pandas dataframe format.\n\n Parameters\n ----------\n filepath : str\n\n Returns\n -------\n Pandas.DataFrame\n \"\"\""
] |
[
{
"param": "df",
"type": null
},
{
"param": "filepath",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filepath",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def read(df, filepath):
return df.read_parquet(filepath)
| 1,363 | 1,022 |
b7287414e8e1b4e2bff835aa3bd460a9b39e2169
|
Anmol-Singh-Jaggi/interview-notes
|
notes/algo-ds-practice/problems/array/count_array_pair_sum_divisible.py
|
[
"MIT"
] |
Python
|
count_pair_sum_divisible
|
<not_specific>
|
def count_pair_sum_divisible(arr, k):
"""
Count number of pairs whose sum is divisible by k.
"""
rem_freq_map = {}
for elem in arr:
rem = elem % k
rem_freq_map[rem] = rem_freq_map.get(rem, 0) + 1
ans = 0
for rem, freq in rem_freq_map.items():
if rem == 0 or rem * 2 == k:
ans += (freq * (freq - 1)) // 2
else:
rem_inverse = k - rem
# CAREFUL: We dont want to count a pair twice!
if rem < rem_inverse:
ans += freq * rem_freq_map.get(rem_inverse, 0)
return ans
|
Count number of pairs whose sum is divisible by k.
|
Count number of pairs whose sum is divisible by k.
|
[
"Count",
"number",
"of",
"pairs",
"whose",
"sum",
"is",
"divisible",
"by",
"k",
"."
] |
def count_pair_sum_divisible(arr, k):
rem_freq_map = {}
for elem in arr:
rem = elem % k
rem_freq_map[rem] = rem_freq_map.get(rem, 0) + 1
ans = 0
for rem, freq in rem_freq_map.items():
if rem == 0 or rem * 2 == k:
ans += (freq * (freq - 1)) // 2
else:
rem_inverse = k - rem
if rem < rem_inverse:
ans += freq * rem_freq_map.get(rem_inverse, 0)
return ans
|
[
"def",
"count_pair_sum_divisible",
"(",
"arr",
",",
"k",
")",
":",
"rem_freq_map",
"=",
"{",
"}",
"for",
"elem",
"in",
"arr",
":",
"rem",
"=",
"elem",
"%",
"k",
"rem_freq_map",
"[",
"rem",
"]",
"=",
"rem_freq_map",
".",
"get",
"(",
"rem",
",",
"0",
")",
"+",
"1",
"ans",
"=",
"0",
"for",
"rem",
",",
"freq",
"in",
"rem_freq_map",
".",
"items",
"(",
")",
":",
"if",
"rem",
"==",
"0",
"or",
"rem",
"*",
"2",
"==",
"k",
":",
"ans",
"+=",
"(",
"freq",
"*",
"(",
"freq",
"-",
"1",
")",
")",
"//",
"2",
"else",
":",
"rem_inverse",
"=",
"k",
"-",
"rem",
"if",
"rem",
"<",
"rem_inverse",
":",
"ans",
"+=",
"freq",
"*",
"rem_freq_map",
".",
"get",
"(",
"rem_inverse",
",",
"0",
")",
"return",
"ans"
] |
Count number of pairs whose sum is divisible by k.
|
[
"Count",
"number",
"of",
"pairs",
"whose",
"sum",
"is",
"divisible",
"by",
"k",
"."
] |
[
"\"\"\"\n Count number of pairs whose sum is divisible by k.\n \"\"\"",
"# CAREFUL: We dont want to count a pair twice!"
] |
[
{
"param": "arr",
"type": null
},
{
"param": "k",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "arr",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "k",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def count_pair_sum_divisible(arr, k):
rem_freq_map = {}
for elem in arr:
rem = elem % k
rem_freq_map[rem] = rem_freq_map.get(rem, 0) + 1
ans = 0
for rem, freq in rem_freq_map.items():
if rem == 0 or rem * 2 == k:
ans += (freq * (freq - 1)) // 2
else:
rem_inverse = k - rem
if rem < rem_inverse:
ans += freq * rem_freq_map.get(rem_inverse, 0)
return ans
| 1,364 | 955 |
2c6c185c27c5d116441130988f8fde9a19812260
|
cesarclarosns/CS50
|
cs50-ai/6-questions/questions.py
|
[
"MIT"
] |
Python
|
compute_idfs
|
<not_specific>
|
def compute_idfs(documents):
"""
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
"""
# Create a set of all the words that exist in the corpus.
words_in_documents = set([word for words in documents.values() for word in words])
# Create a dictionary to map each word to its IDF value.
idfs = dict()
for word in words_in_documents:
# Determine the number of documents in the corpus that contain 'word'.
matched_documents = [
document for document in documents if word in documents[document]
]
# Determine the IDF value for each word.
idfs[word] = math.log(len(documents) / len(matched_documents))
return idfs
|
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
|
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
Any word that appears in at least one of the documents should be in the
resulting dictionary.
|
[
"Given",
"a",
"dictionary",
"of",
"`",
"documents",
"`",
"that",
"maps",
"names",
"of",
"documents",
"to",
"a",
"list",
"of",
"words",
"return",
"a",
"dictionary",
"that",
"maps",
"words",
"to",
"their",
"IDF",
"values",
".",
"Any",
"word",
"that",
"appears",
"in",
"at",
"least",
"one",
"of",
"the",
"documents",
"should",
"be",
"in",
"the",
"resulting",
"dictionary",
"."
] |
def compute_idfs(documents):
words_in_documents = set([word for words in documents.values() for word in words])
idfs = dict()
for word in words_in_documents:
matched_documents = [
document for document in documents if word in documents[document]
]
idfs[word] = math.log(len(documents) / len(matched_documents))
return idfs
|
[
"def",
"compute_idfs",
"(",
"documents",
")",
":",
"words_in_documents",
"=",
"set",
"(",
"[",
"word",
"for",
"words",
"in",
"documents",
".",
"values",
"(",
")",
"for",
"word",
"in",
"words",
"]",
")",
"idfs",
"=",
"dict",
"(",
")",
"for",
"word",
"in",
"words_in_documents",
":",
"matched_documents",
"=",
"[",
"document",
"for",
"document",
"in",
"documents",
"if",
"word",
"in",
"documents",
"[",
"document",
"]",
"]",
"idfs",
"[",
"word",
"]",
"=",
"math",
".",
"log",
"(",
"len",
"(",
"documents",
")",
"/",
"len",
"(",
"matched_documents",
")",
")",
"return",
"idfs"
] |
Given a dictionary of `documents` that maps names of documents to a list
of words, return a dictionary that maps words to their IDF values.
|
[
"Given",
"a",
"dictionary",
"of",
"`",
"documents",
"`",
"that",
"maps",
"names",
"of",
"documents",
"to",
"a",
"list",
"of",
"words",
"return",
"a",
"dictionary",
"that",
"maps",
"words",
"to",
"their",
"IDF",
"values",
"."
] |
[
"\"\"\"\n Given a dictionary of `documents` that maps names of documents to a list\n of words, return a dictionary that maps words to their IDF values.\n\n Any word that appears in at least one of the documents should be in the\n resulting dictionary.\n \"\"\"",
"# Create a set of all the words that exist in the corpus.",
"# Create a dictionary to map each word to its IDF value.",
"# Determine the number of documents in the corpus that contain 'word'.",
"# Determine the IDF value for each word."
] |
[
{
"param": "documents",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "documents",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import math
def compute_idfs(documents):
words_in_documents = set([word for words in documents.values() for word in words])
idfs = dict()
for word in words_in_documents:
matched_documents = [
document for document in documents if word in documents[document]
]
idfs[word] = math.log(len(documents) / len(matched_documents))
return idfs
| 1,366 | 210 |
c054207868a9bdecf78f7aa33bc484c5733321c2
|
tseaver/Zope-RFA
|
src/ZPublisher/HTTPResponse.py
|
[
"ZPL-2.1"
] |
Python
|
fix_xml_preamble
|
<not_specific>
|
def fix_xml_preamble(body, encoding):
""" fixes the encoding in the XML preamble according
to the charset specified in the content-type header.
"""
if body.startswith('<?xml'):
pos_right = body.find('?>') # right end of the XML preamble
body = ('<?xml version="1.0" encoding="%s" ?>'
% encoding) + body[pos_right+2:]
return body
|
fixes the encoding in the XML preamble according
to the charset specified in the content-type header.
|
fixes the encoding in the XML preamble according
to the charset specified in the content-type header.
|
[
"fixes",
"the",
"encoding",
"in",
"the",
"XML",
"preamble",
"according",
"to",
"the",
"charset",
"specified",
"in",
"the",
"content",
"-",
"type",
"header",
"."
] |
def fix_xml_preamble(body, encoding):
if body.startswith('<?xml'):
pos_right = body.find('?>')
body = ('<?xml version="1.0" encoding="%s" ?>'
% encoding) + body[pos_right+2:]
return body
|
[
"def",
"fix_xml_preamble",
"(",
"body",
",",
"encoding",
")",
":",
"if",
"body",
".",
"startswith",
"(",
"'<?xml'",
")",
":",
"pos_right",
"=",
"body",
".",
"find",
"(",
"'?>'",
")",
"body",
"=",
"(",
"'<?xml version=\"1.0\" encoding=\"%s\" ?>'",
"%",
"encoding",
")",
"+",
"body",
"[",
"pos_right",
"+",
"2",
":",
"]",
"return",
"body"
] |
fixes the encoding in the XML preamble according
to the charset specified in the content-type header.
|
[
"fixes",
"the",
"encoding",
"in",
"the",
"XML",
"preamble",
"according",
"to",
"the",
"charset",
"specified",
"in",
"the",
"content",
"-",
"type",
"header",
"."
] |
[
"\"\"\" fixes the encoding in the XML preamble according\n to the charset specified in the content-type header.\n \"\"\"",
"# right end of the XML preamble"
] |
[
{
"param": "body",
"type": null
},
{
"param": "encoding",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "body",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "encoding",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def fix_xml_preamble(body, encoding):
if body.startswith('<?xml'):
pos_right = body.find('?>')
body = ('<?xml version="1.0" encoding="%s" ?>'
% encoding) + body[pos_right+2:]
return body
| 1,367 | 419 |
1e1d48ed3ac8bc7278c9a37bcbaf33497486ab7f
|
MichiganLabs/flask-cors
|
flask_cors/__init__.py
|
[
"MIT"
] |
Python
|
_filter_false
|
<not_specific>
|
def _filter_false(predicate, iterable):
'''
Returns all objects in iterable for which predicate is false.
Equivalent to the Python 3 version of itertools.filterfalse
'''
return filter(lambda x: not predicate(x), iterable)
|
Returns all objects in iterable for which predicate is false.
Equivalent to the Python 3 version of itertools.filterfalse
|
Returns all objects in iterable for which predicate is false.
Equivalent to the Python 3 version of itertools.filterfalse
|
[
"Returns",
"all",
"objects",
"in",
"iterable",
"for",
"which",
"predicate",
"is",
"false",
".",
"Equivalent",
"to",
"the",
"Python",
"3",
"version",
"of",
"itertools",
".",
"filterfalse"
] |
def _filter_false(predicate, iterable):
return filter(lambda x: not predicate(x), iterable)
|
[
"def",
"_filter_false",
"(",
"predicate",
",",
"iterable",
")",
":",
"return",
"filter",
"(",
"lambda",
"x",
":",
"not",
"predicate",
"(",
"x",
")",
",",
"iterable",
")"
] |
Returns all objects in iterable for which predicate is false.
|
[
"Returns",
"all",
"objects",
"in",
"iterable",
"for",
"which",
"predicate",
"is",
"false",
"."
] |
[
"'''\n Returns all objects in iterable for which predicate is false.\n Equivalent to the Python 3 version of itertools.filterfalse\n '''"
] |
[
{
"param": "predicate",
"type": null
},
{
"param": "iterable",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "predicate",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "iterable",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _filter_false(predicate, iterable):
return filter(lambda x: not predicate(x), iterable)
| 1,368 | 464 |
b5e34fb7b689cabc61ac32b11796ed46db02e682
|
mbta/registered
|
registered/intervals/page.py
|
[
"MIT"
] |
Python
|
meters_to_feet
|
int
|
def meters_to_feet(meters: float) -> int:
"""
Convert the given distance in meters to feet.
"""
return int(meters * 3.281)
|
Convert the given distance in meters to feet.
|
Convert the given distance in meters to feet.
|
[
"Convert",
"the",
"given",
"distance",
"in",
"meters",
"to",
"feet",
"."
] |
def meters_to_feet(meters: float) -> int:
return int(meters * 3.281)
|
[
"def",
"meters_to_feet",
"(",
"meters",
":",
"float",
")",
"->",
"int",
":",
"return",
"int",
"(",
"meters",
"*",
"3.281",
")"
] |
Convert the given distance in meters to feet.
|
[
"Convert",
"the",
"given",
"distance",
"in",
"meters",
"to",
"feet",
"."
] |
[
"\"\"\"\n Convert the given distance in meters to feet.\n \"\"\""
] |
[
{
"param": "meters",
"type": "float"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "meters",
"type": "float",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def meters_to_feet(meters: float) -> int:
return int(meters * 3.281)
| 1,370 | 466 |
7732d81f7e3f99a5d87cb30f63c7b2fd19d9975e
|
craignicholson/P2
|
Lesson_6/ImprovingStreetNames/audit.py
|
[
"MIT"
] |
Python
|
update_name
|
<not_specific>
|
def update_name(name, map_old_to_new):
'''
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
'''
for iName in map_old_to_new.keys():
#Check to see if we find a match for a bad value in our map
match = re.search(iName, name)
#if match is found then remap the old value with new value
if match:
name = re.sub(iName+'$', map_old_to_new[iName], name)
return name
|
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
|
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
|
[
"Update",
"name",
"compares",
"current",
"name",
"to",
"the",
"map",
"of",
"bad",
"values",
"to",
"good",
"values",
"and",
"provides",
"the",
"updated",
"name",
"back",
"to",
"the",
"method"
] |
def update_name(name, map_old_to_new):
for iName in map_old_to_new.keys():
match = re.search(iName, name)
if match:
name = re.sub(iName+'$', map_old_to_new[iName], name)
return name
|
[
"def",
"update_name",
"(",
"name",
",",
"map_old_to_new",
")",
":",
"for",
"iName",
"in",
"map_old_to_new",
".",
"keys",
"(",
")",
":",
"match",
"=",
"re",
".",
"search",
"(",
"iName",
",",
"name",
")",
"if",
"match",
":",
"name",
"=",
"re",
".",
"sub",
"(",
"iName",
"+",
"'$'",
",",
"map_old_to_new",
"[",
"iName",
"]",
",",
"name",
")",
"return",
"name"
] |
Update name compares current name to the map of bad values to good values
and provides the updated name back to the method
|
[
"Update",
"name",
"compares",
"current",
"name",
"to",
"the",
"map",
"of",
"bad",
"values",
"to",
"good",
"values",
"and",
"provides",
"the",
"updated",
"name",
"back",
"to",
"the",
"method"
] |
[
"'''\n Update name compares current name to the map of bad values to good values\n and provides the updated name back to the method \n '''",
"#Check to see if we find a match for a bad value in our map ",
"#if match is found then remap the old value with new value "
] |
[
{
"param": "name",
"type": null
},
{
"param": "map_old_to_new",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "map_old_to_new",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def update_name(name, map_old_to_new):
for iName in map_old_to_new.keys():
match = re.search(iName, name)
if match:
name = re.sub(iName+'$', map_old_to_new[iName], name)
return name
| 1,371 | 667 |
236328871b5145ffd276385d82deddfd652165bf
|
ipazc/vrpwrp
|
vrpwrp/helpers/image_helper.py
|
[
"MIT"
] |
Python
|
to_byte_array
|
<not_specific>
|
def to_byte_array(pil_image):
"""
converts the PIL image into a bytes array.
:param pil_image: PIL image to convert to
:return: Bytes array representing the image.
"""
with io.BytesIO() as bytes_io:
pil_image.save(bytes_io, "PNG")
bytes_io.seek(0)
result = bytes_io.read()
return result
|
converts the PIL image into a bytes array.
:param pil_image: PIL image to convert to
:return: Bytes array representing the image.
|
converts the PIL image into a bytes array.
|
[
"converts",
"the",
"PIL",
"image",
"into",
"a",
"bytes",
"array",
"."
] |
def to_byte_array(pil_image):
with io.BytesIO() as bytes_io:
pil_image.save(bytes_io, "PNG")
bytes_io.seek(0)
result = bytes_io.read()
return result
|
[
"def",
"to_byte_array",
"(",
"pil_image",
")",
":",
"with",
"io",
".",
"BytesIO",
"(",
")",
"as",
"bytes_io",
":",
"pil_image",
".",
"save",
"(",
"bytes_io",
",",
"\"PNG\"",
")",
"bytes_io",
".",
"seek",
"(",
"0",
")",
"result",
"=",
"bytes_io",
".",
"read",
"(",
")",
"return",
"result"
] |
converts the PIL image into a bytes array.
|
[
"converts",
"the",
"PIL",
"image",
"into",
"a",
"bytes",
"array",
"."
] |
[
"\"\"\"\n converts the PIL image into a bytes array.\n :param pil_image: PIL image to convert to\n :return: Bytes array representing the image.\n \"\"\""
] |
[
{
"param": "pil_image",
"type": null
}
] |
{
"returns": [
{
"docstring": "Bytes array representing the image.",
"docstring_tokens": [
"Bytes",
"array",
"representing",
"the",
"image",
"."
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "pil_image",
"type": null,
"docstring": "PIL image to convert to",
"docstring_tokens": [
"PIL",
"image",
"to",
"convert",
"to"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import io
def to_byte_array(pil_image):
with io.BytesIO() as bytes_io:
pil_image.save(bytes_io, "PNG")
bytes_io.seek(0)
result = bytes_io.read()
return result
| 1,372 | 367 |
3ce20dfc322ee394a83480afd82f77312e76ad8a
|
Flowdalic/salt
|
tests/unit/states/test_module.py
|
[
"Apache-2.0"
] |
Python
|
_mocked_func_named
|
<not_specific>
|
def _mocked_func_named(
name,
names=(
"Fred",
"Swen",
),
):
"""
Mocked function with named defaults.
:param name:
:param names:
:return:
"""
return {"name": name, "names": names}
|
Mocked function with named defaults.
:param name:
:param names:
:return:
|
Mocked function with named defaults.
|
[
"Mocked",
"function",
"with",
"named",
"defaults",
"."
] |
def _mocked_func_named(
name,
names=(
"Fred",
"Swen",
),
):
return {"name": name, "names": names}
|
[
"def",
"_mocked_func_named",
"(",
"name",
",",
"names",
"=",
"(",
"\"Fred\"",
",",
"\"Swen\"",
",",
")",
",",
")",
":",
"return",
"{",
"\"name\"",
":",
"name",
",",
"\"names\"",
":",
"names",
"}"
] |
Mocked function with named defaults.
|
[
"Mocked",
"function",
"with",
"named",
"defaults",
"."
] |
[
"\"\"\"\n Mocked function with named defaults.\n\n :param name:\n :param names:\n :return:\n \"\"\""
] |
[
{
"param": "name",
"type": null
},
{
"param": "names",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "names",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _mocked_func_named(
name,
names=(
"Fred",
"Swen",
),
):
return {"name": name, "names": names}
| 1,373 | 922 |
9c4e9861697a88395be66650325a10d60515320c
|
NickSmyr/ai-player-agents
|
A1 - Search and Games/minimax.py
|
[
"MIT"
] |
Python
|
point_distance_l1
|
float
|
def point_distance_l1(point1: tuple, point2: tuple, obstacle: tuple = None) -> float:
"""
Distance between two 2-d points using the Manhattan distance.
:param tuple point1: (x,y) coordinates of first point
:param tuple point2: (x,y) coordinates of first point
:param tuple obstacle: (x,y) coordinates of obstacle point (i.e. the opponent's boat)
:return: distance as a float object
"""
dist = abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])
# print(point1, point2, dist)
return dist
|
Distance between two 2-d points using the Manhattan distance.
:param tuple point1: (x,y) coordinates of first point
:param tuple point2: (x,y) coordinates of first point
:param tuple obstacle: (x,y) coordinates of obstacle point (i.e. the opponent's boat)
:return: distance as a float object
|
Distance between two 2-d points using the Manhattan distance.
|
[
"Distance",
"between",
"two",
"2",
"-",
"d",
"points",
"using",
"the",
"Manhattan",
"distance",
"."
] |
def point_distance_l1(point1: tuple, point2: tuple, obstacle: tuple = None) -> float:
dist = abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])
return dist
|
[
"def",
"point_distance_l1",
"(",
"point1",
":",
"tuple",
",",
"point2",
":",
"tuple",
",",
"obstacle",
":",
"tuple",
"=",
"None",
")",
"->",
"float",
":",
"dist",
"=",
"abs",
"(",
"point1",
"[",
"0",
"]",
"-",
"point2",
"[",
"0",
"]",
")",
"+",
"abs",
"(",
"point1",
"[",
"1",
"]",
"-",
"point2",
"[",
"1",
"]",
")",
"return",
"dist"
] |
Distance between two 2-d points using the Manhattan distance.
|
[
"Distance",
"between",
"two",
"2",
"-",
"d",
"points",
"using",
"the",
"Manhattan",
"distance",
"."
] |
[
"\"\"\"\n Distance between two 2-d points using the Manhattan distance.\n :param tuple point1: (x,y) coordinates of first point\n :param tuple point2: (x,y) coordinates of first point\n :param tuple obstacle: (x,y) coordinates of obstacle point (i.e. the opponent's boat)\n :return: distance as a float object\n \"\"\"",
"# print(point1, point2, dist)"
] |
[
{
"param": "point1",
"type": "tuple"
},
{
"param": "point2",
"type": "tuple"
},
{
"param": "obstacle",
"type": "tuple"
}
] |
{
"returns": [
{
"docstring": "distance as a float object",
"docstring_tokens": [
"distance",
"as",
"a",
"float",
"object"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "point1",
"type": "tuple",
"docstring": "(x,y) coordinates of first point",
"docstring_tokens": [
"(",
"x",
"y",
")",
"coordinates",
"of",
"first",
"point"
],
"default": null,
"is_optional": false
},
{
"identifier": "point2",
"type": "tuple",
"docstring": "(x,y) coordinates of first point",
"docstring_tokens": [
"(",
"x",
"y",
")",
"coordinates",
"of",
"first",
"point"
],
"default": null,
"is_optional": false
},
{
"identifier": "obstacle",
"type": "tuple",
"docstring": "(x,y) coordinates of obstacle point",
"docstring_tokens": [
"(",
"x",
"y",
")",
"coordinates",
"of",
"obstacle",
"point"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def point_distance_l1(point1: tuple, point2: tuple, obstacle: tuple = None) -> float:
dist = abs(point1[0] - point2[0]) + abs(point1[1] - point2[1])
return dist
| 1,374 | 805 |
29f37409f9a7f483d4454a4f385d2f628effaac2
|
blobtoolkit/pipeline
|
v1/scripts/taxonomy.py
|
[
"MIT"
] |
Python
|
parents_at_rank
|
<not_specific>
|
def parents_at_rank(graph, root, parent_rank):
"""
loop through graph from root taxon, assigning leaf nodes to parent nodes at
a given rank.
"""
parents = {}
def descend(root, parent):
"""
Iteratively descend from a root to generate a set of taxids
unless the child taxid is in the list of taxids to mask.
"""
if root in graph:
for child, rank in graph[root].items():
if rank == parent_rank:
descend(child, child)
elif parent:
parents[child] = parent
descend(child, parent)
else:
descend(child, None)
descend(str(root), str(root))
return parents
|
loop through graph from root taxon, assigning leaf nodes to parent nodes at
a given rank.
|
loop through graph from root taxon, assigning leaf nodes to parent nodes at
a given rank.
|
[
"loop",
"through",
"graph",
"from",
"root",
"taxon",
"assigning",
"leaf",
"nodes",
"to",
"parent",
"nodes",
"at",
"a",
"given",
"rank",
"."
] |
def parents_at_rank(graph, root, parent_rank):
parents = {}
def descend(root, parent):
if root in graph:
for child, rank in graph[root].items():
if rank == parent_rank:
descend(child, child)
elif parent:
parents[child] = parent
descend(child, parent)
else:
descend(child, None)
descend(str(root), str(root))
return parents
|
[
"def",
"parents_at_rank",
"(",
"graph",
",",
"root",
",",
"parent_rank",
")",
":",
"parents",
"=",
"{",
"}",
"def",
"descend",
"(",
"root",
",",
"parent",
")",
":",
"\"\"\"\n Iteratively descend from a root to generate a set of taxids\n unless the child taxid is in the list of taxids to mask.\n \"\"\"",
"if",
"root",
"in",
"graph",
":",
"for",
"child",
",",
"rank",
"in",
"graph",
"[",
"root",
"]",
".",
"items",
"(",
")",
":",
"if",
"rank",
"==",
"parent_rank",
":",
"descend",
"(",
"child",
",",
"child",
")",
"elif",
"parent",
":",
"parents",
"[",
"child",
"]",
"=",
"parent",
"descend",
"(",
"child",
",",
"parent",
")",
"else",
":",
"descend",
"(",
"child",
",",
"None",
")",
"descend",
"(",
"str",
"(",
"root",
")",
",",
"str",
"(",
"root",
")",
")",
"return",
"parents"
] |
loop through graph from root taxon, assigning leaf nodes to parent nodes at
a given rank.
|
[
"loop",
"through",
"graph",
"from",
"root",
"taxon",
"assigning",
"leaf",
"nodes",
"to",
"parent",
"nodes",
"at",
"a",
"given",
"rank",
"."
] |
[
"\"\"\"\n loop through graph from root taxon, assigning leaf nodes to parent nodes at\n a given rank.\n \"\"\"",
"\"\"\"\n Iteratively descend from a root to generate a set of taxids\n unless the child taxid is in the list of taxids to mask.\n \"\"\""
] |
[
{
"param": "graph",
"type": null
},
{
"param": "root",
"type": null
},
{
"param": "parent_rank",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "root",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "parent_rank",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def parents_at_rank(graph, root, parent_rank):
parents = {}
def descend(root, parent):
if root in graph:
for child, rank in graph[root].items():
if rank == parent_rank:
descend(child, child)
elif parent:
parents[child] = parent
descend(child, parent)
else:
descend(child, None)
descend(str(root), str(root))
return parents
| 1,375 | 152 |
08e3bddb0a455d6736a4900f2524eaca0858d05a
|
Aalto5G/SecurityPolicyManagement
|
Testing/performance/TEST_CES_API_DB.py
|
[
"BSD-3-Clause"
] |
Python
|
test13
| null |
async def test13(mysql_obj):
'''
test Update query creation function for insert in host_sfqdn
'''
data = await mysql_obj.host_policy_update('SFQDN', id=1, fqdn='test.aalto.com', sfqdn='httptest.hassaan.aalto.fi', proxy_required=1, carriergrade=0, protocol='5', port='10', loose_packet=3)
print('test13() reply= {}'.format(data))
# print(json.dumps(data, indent=4))
print('-----------------------------')
|
test Update query creation function for insert in host_sfqdn
|
test Update query creation function for insert in host_sfqdn
|
[
"test",
"Update",
"query",
"creation",
"function",
"for",
"insert",
"in",
"host_sfqdn"
] |
async def test13(mysql_obj):
data = await mysql_obj.host_policy_update('SFQDN', id=1, fqdn='test.aalto.com', sfqdn='httptest.hassaan.aalto.fi', proxy_required=1, carriergrade=0, protocol='5', port='10', loose_packet=3)
print('test13() reply= {}'.format(data))
print('-----------------------------')
|
[
"async",
"def",
"test13",
"(",
"mysql_obj",
")",
":",
"data",
"=",
"await",
"mysql_obj",
".",
"host_policy_update",
"(",
"'SFQDN'",
",",
"id",
"=",
"1",
",",
"fqdn",
"=",
"'test.aalto.com'",
",",
"sfqdn",
"=",
"'httptest.hassaan.aalto.fi'",
",",
"proxy_required",
"=",
"1",
",",
"carriergrade",
"=",
"0",
",",
"protocol",
"=",
"'5'",
",",
"port",
"=",
"'10'",
",",
"loose_packet",
"=",
"3",
")",
"print",
"(",
"'test13() reply= {}'",
".",
"format",
"(",
"data",
")",
")",
"print",
"(",
"'-----------------------------'",
")"
] |
test Update query creation function for insert in host_sfqdn
|
[
"test",
"Update",
"query",
"creation",
"function",
"for",
"insert",
"in",
"host_sfqdn"
] |
[
"'''\n test Update query creation function for insert in host_sfqdn\n '''",
"# print(json.dumps(data, indent=4))"
] |
[
{
"param": "mysql_obj",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "mysql_obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
async def test13(mysql_obj):
data = await mysql_obj.host_policy_update('SFQDN', id=1, fqdn='test.aalto.com', sfqdn='httptest.hassaan.aalto.fi', proxy_required=1, carriergrade=0, protocol='5', port='10', loose_packet=3)
print('test13() reply= {}'.format(data))
print('-----------------------------')
| 1,376 | 140 |
fb31a59912d9a7aa4743c1959e0bbcedefa53dcb
|
nmaludy/st2cd
|
actions/json_field_modify.py
|
[
"Apache-2.0"
] |
Python
|
modify_json_field_and_save
|
<not_specific>
|
def modify_json_field_and_save(file_path, field, value):
"""
Loads JSON file from disk, looks for field and modifies field's
value to given value.
:param file_path: Absolute path to JSON file.
:type file_path: ``str``
:param field: Name of the field to modify.
:type field: ``str``
:param value: Value to replace with.
:type value: ``str`` or ``int`` or ``boolean`` or ``object``
"""
if not os.path.exists(file_path):
raise Exception('File %s not found.' % file_path)
json_doc = None
with open(file_path, 'r') as json_file:
json_doc = json.load(json_file)
if field not in json_doc:
raise Exception('Field %s not found in doc %s.' % (field, file_path))
json_doc[field] = value
with open(file_path, 'w+') as json_file:
json_file.write(json.dumps(json_doc, indent=2, sort_keys=True))
return
|
Loads JSON file from disk, looks for field and modifies field's
value to given value.
:param file_path: Absolute path to JSON file.
:type file_path: ``str``
:param field: Name of the field to modify.
:type field: ``str``
:param value: Value to replace with.
:type value: ``str`` or ``int`` or ``boolean`` or ``object``
|
Loads JSON file from disk, looks for field and modifies field's
value to given value.
|
[
"Loads",
"JSON",
"file",
"from",
"disk",
"looks",
"for",
"field",
"and",
"modifies",
"field",
"'",
"s",
"value",
"to",
"given",
"value",
"."
] |
def modify_json_field_and_save(file_path, field, value):
if not os.path.exists(file_path):
raise Exception('File %s not found.' % file_path)
json_doc = None
with open(file_path, 'r') as json_file:
json_doc = json.load(json_file)
if field not in json_doc:
raise Exception('Field %s not found in doc %s.' % (field, file_path))
json_doc[field] = value
with open(file_path, 'w+') as json_file:
json_file.write(json.dumps(json_doc, indent=2, sort_keys=True))
return
|
[
"def",
"modify_json_field_and_save",
"(",
"file_path",
",",
"field",
",",
"value",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"Exception",
"(",
"'File %s not found.'",
"%",
"file_path",
")",
"json_doc",
"=",
"None",
"with",
"open",
"(",
"file_path",
",",
"'r'",
")",
"as",
"json_file",
":",
"json_doc",
"=",
"json",
".",
"load",
"(",
"json_file",
")",
"if",
"field",
"not",
"in",
"json_doc",
":",
"raise",
"Exception",
"(",
"'Field %s not found in doc %s.'",
"%",
"(",
"field",
",",
"file_path",
")",
")",
"json_doc",
"[",
"field",
"]",
"=",
"value",
"with",
"open",
"(",
"file_path",
",",
"'w+'",
")",
"as",
"json_file",
":",
"json_file",
".",
"write",
"(",
"json",
".",
"dumps",
"(",
"json_doc",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
")",
"return"
] |
Loads JSON file from disk, looks for field and modifies field's
value to given value.
|
[
"Loads",
"JSON",
"file",
"from",
"disk",
"looks",
"for",
"field",
"and",
"modifies",
"field",
"'",
"s",
"value",
"to",
"given",
"value",
"."
] |
[
"\"\"\"\n Loads JSON file from disk, looks for field and modifies field's\n value to given value.\n\n :param file_path: Absolute path to JSON file.\n :type file_path: ``str``\n\n :param field: Name of the field to modify.\n :type field: ``str``\n\n :param value: Value to replace with.\n :type value: ``str`` or ``int`` or ``boolean`` or ``object``\n \"\"\""
] |
[
{
"param": "file_path",
"type": null
},
{
"param": "field",
"type": null
},
{
"param": "value",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "file_path",
"type": null,
"docstring": "Absolute path to JSON file.",
"docstring_tokens": [
"Absolute",
"path",
"to",
"JSON",
"file",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "field",
"type": null,
"docstring": "Name of the field to modify.",
"docstring_tokens": [
"Name",
"of",
"the",
"field",
"to",
"modify",
"."
],
"default": null,
"is_optional": null
},
{
"identifier": "value",
"type": null,
"docstring": "Value to replace with.",
"docstring_tokens": [
"Value",
"to",
"replace",
"with",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import json
def modify_json_field_and_save(file_path, field, value):
if not os.path.exists(file_path):
raise Exception('File %s not found.' % file_path)
json_doc = None
with open(file_path, 'r') as json_file:
json_doc = json.load(json_file)
if field not in json_doc:
raise Exception('Field %s not found in doc %s.' % (field, file_path))
json_doc[field] = value
with open(file_path, 'w+') as json_file:
json_file.write(json.dumps(json_doc, indent=2, sort_keys=True))
return
| 1,377 | 644 |
d157692ebab6ae577b945f56101f5dfe00812bfd
|
abitmore/hivemind
|
hive/server/common/helpers.py
|
[
"MIT"
] |
Python
|
valid_sort
|
<not_specific>
|
def valid_sort(sort, allow_empty=False):
"""Returns validated sort name or throws Assert."""
if not sort:
assert allow_empty, 'sort must be specified'
return ""
assert isinstance(sort, str), 'sort must be a string'
# TODO: differentiate valid sorts on comm vs tag
valid_sorts = ['trending', 'promoted', 'hot', 'created',
'payout', 'payout_comments', 'muted']
assert sort in valid_sorts, 'invalid sort `%s`' % sort
return sort
|
Returns validated sort name or throws Assert.
|
Returns validated sort name or throws Assert.
|
[
"Returns",
"validated",
"sort",
"name",
"or",
"throws",
"Assert",
"."
] |
def valid_sort(sort, allow_empty=False):
if not sort:
assert allow_empty, 'sort must be specified'
return ""
assert isinstance(sort, str), 'sort must be a string'
valid_sorts = ['trending', 'promoted', 'hot', 'created',
'payout', 'payout_comments', 'muted']
assert sort in valid_sorts, 'invalid sort `%s`' % sort
return sort
|
[
"def",
"valid_sort",
"(",
"sort",
",",
"allow_empty",
"=",
"False",
")",
":",
"if",
"not",
"sort",
":",
"assert",
"allow_empty",
",",
"'sort must be specified'",
"return",
"\"\"",
"assert",
"isinstance",
"(",
"sort",
",",
"str",
")",
",",
"'sort must be a string'",
"valid_sorts",
"=",
"[",
"'trending'",
",",
"'promoted'",
",",
"'hot'",
",",
"'created'",
",",
"'payout'",
",",
"'payout_comments'",
",",
"'muted'",
"]",
"assert",
"sort",
"in",
"valid_sorts",
",",
"'invalid sort `%s`'",
"%",
"sort",
"return",
"sort"
] |
Returns validated sort name or throws Assert.
|
[
"Returns",
"validated",
"sort",
"name",
"or",
"throws",
"Assert",
"."
] |
[
"\"\"\"Returns validated sort name or throws Assert.\"\"\"",
"# TODO: differentiate valid sorts on comm vs tag"
] |
[
{
"param": "sort",
"type": null
},
{
"param": "allow_empty",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "sort",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "allow_empty",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def valid_sort(sort, allow_empty=False):
if not sort:
assert allow_empty, 'sort must be specified'
return ""
assert isinstance(sort, str), 'sort must be a string'
valid_sorts = ['trending', 'promoted', 'hot', 'created',
'payout', 'payout_comments', 'muted']
assert sort in valid_sorts, 'invalid sort `%s`' % sort
return sort
| 1,378 | 228 |
a4bd33a875d211b9ae8874a18be0a9fc354d4865
|
tmulder/antelope_contrib
|
bin/db/smc2db/smc2db.xpy
|
[
"BSD-2-Clause",
"MIT"
] |
Python
|
_get_segtype
|
<not_specific>
|
def _get_segtype(data):
"""
Returns the segtype of the input data.
"""
data_type = data['text_header']['data_type']
if data_type == None: segtype = 'A'
elif data_type[0] == '0': segtype = 'A'
elif data_type[0] == '1': segtype = 'A'
elif data_type[0] == '2': segtype = 'A'
elif data_type[0] == '3': segtype = 'V'
elif data_type[0] == '4': segtype = 'V' #this is not the correct code...
else: raise(Exception('Conversion of response/Fourier amplitude spectra not ' \
'currently supported.'))
return segtype
|
Returns the segtype of the input data.
|
Returns the segtype of the input data.
|
[
"Returns",
"the",
"segtype",
"of",
"the",
"input",
"data",
"."
] |
def _get_segtype(data):
data_type = data['text_header']['data_type']
if data_type == None: segtype = 'A'
elif data_type[0] == '0': segtype = 'A'
elif data_type[0] == '1': segtype = 'A'
elif data_type[0] == '2': segtype = 'A'
elif data_type[0] == '3': segtype = 'V'
elif data_type[0] == '4': segtype = 'V'
else: raise(Exception('Conversion of response/Fourier amplitude spectra not ' \
'currently supported.'))
return segtype
|
[
"def",
"_get_segtype",
"(",
"data",
")",
":",
"data_type",
"=",
"data",
"[",
"'text_header'",
"]",
"[",
"'data_type'",
"]",
"if",
"data_type",
"==",
"None",
":",
"segtype",
"=",
"'A'",
"elif",
"data_type",
"[",
"0",
"]",
"==",
"'0'",
":",
"segtype",
"=",
"'A'",
"elif",
"data_type",
"[",
"0",
"]",
"==",
"'1'",
":",
"segtype",
"=",
"'A'",
"elif",
"data_type",
"[",
"0",
"]",
"==",
"'2'",
":",
"segtype",
"=",
"'A'",
"elif",
"data_type",
"[",
"0",
"]",
"==",
"'3'",
":",
"segtype",
"=",
"'V'",
"elif",
"data_type",
"[",
"0",
"]",
"==",
"'4'",
":",
"segtype",
"=",
"'V'",
"else",
":",
"raise",
"(",
"Exception",
"(",
"'Conversion of response/Fourier amplitude spectra not '",
"'currently supported.'",
")",
")",
"return",
"segtype"
] |
Returns the segtype of the input data.
|
[
"Returns",
"the",
"segtype",
"of",
"the",
"input",
"data",
"."
] |
[
"\"\"\"\n Returns the segtype of the input data.\n \"\"\"",
"#this is not the correct code..."
] |
[
{
"param": "data",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _get_segtype(data):
data_type = data['text_header']['data_type']
if data_type == None: segtype = 'A'
elif data_type[0] == '0': segtype = 'A'
elif data_type[0] == '1': segtype = 'A'
elif data_type[0] == '2': segtype = 'A'
elif data_type[0] == '3': segtype = 'V'
elif data_type[0] == '4': segtype = 'V'
else: raise(Exception('Conversion of response/Fourier amplitude spectra not ' \
'currently supported.'))
return segtype
| 1,380 | 257 |
4eddcbe0ad71a5a92ff8babb78b3f812a4bc214e
|
suzuken/xbrlparser
|
lib/rdflib-3.1.0/rdflib/plugins/parsers/notation3.py
|
[
"MIT"
] |
Python
|
_utilized
|
<not_specific>
|
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
|
_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node
|
_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node
|
[
"_utilized",
"(",
"n",
"node",
"other_attrs",
"unsuppressedPrefixes",
")",
"-",
">",
"boolean",
"Return",
"true",
"if",
"that",
"nodespace",
"is",
"utilized",
"within",
"the",
"node"
] |
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
|
[
"def",
"_utilized",
"(",
"n",
",",
"node",
",",
"other_attrs",
",",
"unsuppressedPrefixes",
")",
":",
"if",
"n",
".",
"startswith",
"(",
"'xmlns:'",
")",
":",
"n",
"=",
"n",
"[",
"6",
":",
"]",
"elif",
"n",
".",
"startswith",
"(",
"'xmlns'",
")",
":",
"n",
"=",
"n",
"[",
"5",
":",
"]",
"if",
"(",
"n",
"==",
"\"\"",
"and",
"node",
".",
"prefix",
"in",
"[",
"\"#default\"",
",",
"None",
"]",
")",
"or",
"n",
"==",
"node",
".",
"prefix",
"or",
"n",
"in",
"unsuppressedPrefixes",
":",
"return",
"1",
"for",
"attr",
"in",
"other_attrs",
":",
"if",
"n",
"==",
"attr",
".",
"prefix",
":",
"return",
"1",
"return",
"0"
] |
_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node
|
[
"_utilized",
"(",
"n",
"node",
"other_attrs",
"unsuppressedPrefixes",
")",
"-",
">",
"boolean",
"Return",
"true",
"if",
"that",
"nodespace",
"is",
"utilized",
"within",
"the",
"node"
] |
[
"'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean\n Return true if that nodespace is utilized within the node'''"
] |
[
{
"param": "n",
"type": null
},
{
"param": "node",
"type": null
},
{
"param": "other_attrs",
"type": null
},
{
"param": "unsuppressedPrefixes",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "node",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "other_attrs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "unsuppressedPrefixes",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix: return 1
return 0
| 1,381 | 374 |
0564d804f92330ede57f0dfb9a806f5df8f957e5
|
sasdevs/Courses-
|
MITx-6.00.2x/Lecture 10 - Experimental Data Part 1/lectureCode_L17_code.py
|
[
"Apache-2.0"
] |
Python
|
rSquare
|
<not_specific>
|
def rSquare(measured, estimated):
"""measured: one dimensional array of measured values
estimate: one dimensional array of predicted values"""
SEE = ((estimated - measured)**2).sum()
mMean = measured.sum()/float(len(measured))
MV = ((mMean - measured)**2).sum()
return 1 - SEE/MV
|
measured: one dimensional array of measured values
estimate: one dimensional array of predicted values
|
one dimensional array of measured values
estimate: one dimensional array of predicted values
|
[
"one",
"dimensional",
"array",
"of",
"measured",
"values",
"estimate",
":",
"one",
"dimensional",
"array",
"of",
"predicted",
"values"
] |
def rSquare(measured, estimated):
SEE = ((estimated - measured)**2).sum()
mMean = measured.sum()/float(len(measured))
MV = ((mMean - measured)**2).sum()
return 1 - SEE/MV
|
[
"def",
"rSquare",
"(",
"measured",
",",
"estimated",
")",
":",
"SEE",
"=",
"(",
"(",
"estimated",
"-",
"measured",
")",
"**",
"2",
")",
".",
"sum",
"(",
")",
"mMean",
"=",
"measured",
".",
"sum",
"(",
")",
"/",
"float",
"(",
"len",
"(",
"measured",
")",
")",
"MV",
"=",
"(",
"(",
"mMean",
"-",
"measured",
")",
"**",
"2",
")",
".",
"sum",
"(",
")",
"return",
"1",
"-",
"SEE",
"/",
"MV"
] |
measured: one dimensional array of measured values
estimate: one dimensional array of predicted values
|
[
"measured",
":",
"one",
"dimensional",
"array",
"of",
"measured",
"values",
"estimate",
":",
"one",
"dimensional",
"array",
"of",
"predicted",
"values"
] |
[
"\"\"\"measured: one dimensional array of measured values\n estimate: one dimensional array of predicted values\"\"\""
] |
[
{
"param": "measured",
"type": null
},
{
"param": "estimated",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "measured",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "estimated",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def rSquare(measured, estimated):
SEE = ((estimated - measured)**2).sum()
mMean = measured.sum()/float(len(measured))
MV = ((mMean - measured)**2).sum()
return 1 - SEE/MV
| 1,382 | 845 |
7a39fbd265fbc708c4a6cda41219da7f26dbe56f
|
jackdevey/Govee-CLI
|
main.py
|
[
"WTFPL"
] |
Python
|
listdevices
| null |
def listdevices(ctx):
"""
Shows the devices registered to your Govee account
"""
content = ctx.devices
# For each device in the users account, display it's details and
# assign an iid that can be used to access the device
i = 0
for device in content:
print("------------------")
print("Device iid: " + str(i))
print("Device MAC: " + device["device"])
print("Model Name: " + device["model"])
print("Device Nickname: " + device["deviceName"])
print("Controllable: " + str(device["controllable"]))
print("Retrievable: " + str(device["retrievable"]))
print("Commands: ")
for commands in device["supportCmds"]:
print(" " + commands)
|
Shows the devices registered to your Govee account
|
Shows the devices registered to your Govee account
|
[
"Shows",
"the",
"devices",
"registered",
"to",
"your",
"Govee",
"account"
] |
def listdevices(ctx):
content = ctx.devices
i = 0
for device in content:
print("------------------")
print("Device iid: " + str(i))
print("Device MAC: " + device["device"])
print("Model Name: " + device["model"])
print("Device Nickname: " + device["deviceName"])
print("Controllable: " + str(device["controllable"]))
print("Retrievable: " + str(device["retrievable"]))
print("Commands: ")
for commands in device["supportCmds"]:
print(" " + commands)
|
[
"def",
"listdevices",
"(",
"ctx",
")",
":",
"content",
"=",
"ctx",
".",
"devices",
"i",
"=",
"0",
"for",
"device",
"in",
"content",
":",
"print",
"(",
"\"------------------\"",
")",
"print",
"(",
"\"Device iid: \"",
"+",
"str",
"(",
"i",
")",
")",
"print",
"(",
"\"Device MAC: \"",
"+",
"device",
"[",
"\"device\"",
"]",
")",
"print",
"(",
"\"Model Name: \"",
"+",
"device",
"[",
"\"model\"",
"]",
")",
"print",
"(",
"\"Device Nickname: \"",
"+",
"device",
"[",
"\"deviceName\"",
"]",
")",
"print",
"(",
"\"Controllable: \"",
"+",
"str",
"(",
"device",
"[",
"\"controllable\"",
"]",
")",
")",
"print",
"(",
"\"Retrievable: \"",
"+",
"str",
"(",
"device",
"[",
"\"retrievable\"",
"]",
")",
")",
"print",
"(",
"\"Commands: \"",
")",
"for",
"commands",
"in",
"device",
"[",
"\"supportCmds\"",
"]",
":",
"print",
"(",
"\" \"",
"+",
"commands",
")"
] |
Shows the devices registered to your Govee account
|
[
"Shows",
"the",
"devices",
"registered",
"to",
"your",
"Govee",
"account"
] |
[
"\"\"\"\n Shows the devices registered to your Govee account\n \"\"\"",
"# For each device in the users account, display it's details and",
"# assign an iid that can be used to access the device"
] |
[
{
"param": "ctx",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "ctx",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def listdevices(ctx):
content = ctx.devices
i = 0
for device in content:
print("------------------")
print("Device iid: " + str(i))
print("Device MAC: " + device["device"])
print("Model Name: " + device["model"])
print("Device Nickname: " + device["deviceName"])
print("Controllable: " + str(device["controllable"]))
print("Retrievable: " + str(device["retrievable"]))
print("Commands: ")
for commands in device["supportCmds"]:
print(" " + commands)
| 1,383 | 478 |
06f8e442be7b8f6d76b6ebfd12aee354f47c2ea8
|
zoeimogen/AoC2019
|
aoc2019/day12.py
|
[
"MIT"
] |
Python
|
energy
|
int
|
def energy(moons: List[int]) -> int:
'''Calculate the energy of the system'''
e = 0
for moon in range(0, 4):
e += (sum([abs(p) for p in moons[moon*6:moon*6+3]]) *
sum([abs(v) for v in moons[moon*6+3:moon*6+6]]))
return e
|
Calculate the energy of the system
|
Calculate the energy of the system
|
[
"Calculate",
"the",
"energy",
"of",
"the",
"system"
] |
def energy(moons: List[int]) -> int:
e = 0
for moon in range(0, 4):
e += (sum([abs(p) for p in moons[moon*6:moon*6+3]]) *
sum([abs(v) for v in moons[moon*6+3:moon*6+6]]))
return e
|
[
"def",
"energy",
"(",
"moons",
":",
"List",
"[",
"int",
"]",
")",
"->",
"int",
":",
"e",
"=",
"0",
"for",
"moon",
"in",
"range",
"(",
"0",
",",
"4",
")",
":",
"e",
"+=",
"(",
"sum",
"(",
"[",
"abs",
"(",
"p",
")",
"for",
"p",
"in",
"moons",
"[",
"moon",
"*",
"6",
":",
"moon",
"*",
"6",
"+",
"3",
"]",
"]",
")",
"*",
"sum",
"(",
"[",
"abs",
"(",
"v",
")",
"for",
"v",
"in",
"moons",
"[",
"moon",
"*",
"6",
"+",
"3",
":",
"moon",
"*",
"6",
"+",
"6",
"]",
"]",
")",
")",
"return",
"e"
] |
Calculate the energy of the system
|
[
"Calculate",
"the",
"energy",
"of",
"the",
"system"
] |
[
"'''Calculate the energy of the system'''"
] |
[
{
"param": "moons",
"type": "List[int]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "moons",
"type": "List[int]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def energy(moons: List[int]) -> int:
e = 0
for moon in range(0, 4):
e += (sum([abs(p) for p in moons[moon*6:moon*6+3]]) *
sum([abs(v) for v in moons[moon*6+3:moon*6+6]]))
return e
| 1,384 | 1,000 |
906d39059be743c791b85759ce5baf0f4b4592d7
|
WasabiWabiSabi/container_dev
|
containers/fibonacci.py
|
[
"MIT"
] |
Python
|
fib_yield
| null |
def fib_yield(n=None):
'''
This function returns a generator that computes
the first n fibonacci numbers.
If n is None, then the generator is infinite.
'''
f0 = 1
f1 = 1
yield 1
if n is not None:
for i in range(n - 1):
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
else:
while True:
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
|
This function returns a generator that computes
the first n fibonacci numbers.
If n is None, then the generator is infinite.
|
This function returns a generator that computes
the first n fibonacci numbers.
If n is None, then the generator is infinite.
|
[
"This",
"function",
"returns",
"a",
"generator",
"that",
"computes",
"the",
"first",
"n",
"fibonacci",
"numbers",
".",
"If",
"n",
"is",
"None",
"then",
"the",
"generator",
"is",
"infinite",
"."
] |
def fib_yield(n=None):
f0 = 1
f1 = 1
yield 1
if n is not None:
for i in range(n - 1):
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
else:
while True:
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
|
[
"def",
"fib_yield",
"(",
"n",
"=",
"None",
")",
":",
"f0",
"=",
"1",
"f1",
"=",
"1",
"yield",
"1",
"if",
"n",
"is",
"not",
"None",
":",
"for",
"i",
"in",
"range",
"(",
"n",
"-",
"1",
")",
":",
"f2",
"=",
"f1",
"+",
"f0",
"f0",
"=",
"f1",
"f1",
"=",
"f2",
"yield",
"f0",
"else",
":",
"while",
"True",
":",
"f2",
"=",
"f1",
"+",
"f0",
"f0",
"=",
"f1",
"f1",
"=",
"f2",
"yield",
"f0"
] |
This function returns a generator that computes
the first n fibonacci numbers.
|
[
"This",
"function",
"returns",
"a",
"generator",
"that",
"computes",
"the",
"first",
"n",
"fibonacci",
"numbers",
"."
] |
[
"'''\n This function returns a generator that computes\n the first n fibonacci numbers.\n If n is None, then the generator is infinite.\n '''"
] |
[
{
"param": "n",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "n",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def fib_yield(n=None):
f0 = 1
f1 = 1
yield 1
if n is not None:
for i in range(n - 1):
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
else:
while True:
f2 = f1 + f0
f0 = f1
f1 = f2
yield f0
| 1,385 | 588 |
95b66c39d4f9ebaf550fa2a0e7d8885df0da518a
|
snowdj/course
|
lectures/economic_models/generalized_roy/private_package/grmpy/tools/user/processing.py
|
[
"MIT"
] |
Python
|
_process_not_bene
|
<not_specific>
|
def _process_not_bene(list_, dict_, keyword):
""" This function processes all of the initialization file, but the
BENE section.
"""
# Distribute information
name, val = list_[0], list_[1]
# Prepare container.
if name not in dict_[keyword].keys():
if name in ['coeff']:
dict_[keyword][name] = []
# Type conversion
if name in ['agents', 'maxiter']:
val = int(val)
elif name in ['source', 'algorithm', 'start', 'version']:
val = str(val)
else:
val = float(val)
# Collect information
if name in ['coeff']:
dict_[keyword][name] += [val]
else:
dict_[keyword][name] = val
# Finishing.
return dict_
|
This function processes all of the initialization file, but the
BENE section.
|
This function processes all of the initialization file, but the
BENE section.
|
[
"This",
"function",
"processes",
"all",
"of",
"the",
"initialization",
"file",
"but",
"the",
"BENE",
"section",
"."
] |
def _process_not_bene(list_, dict_, keyword):
name, val = list_[0], list_[1]
if name not in dict_[keyword].keys():
if name in ['coeff']:
dict_[keyword][name] = []
if name in ['agents', 'maxiter']:
val = int(val)
elif name in ['source', 'algorithm', 'start', 'version']:
val = str(val)
else:
val = float(val)
if name in ['coeff']:
dict_[keyword][name] += [val]
else:
dict_[keyword][name] = val
return dict_
|
[
"def",
"_process_not_bene",
"(",
"list_",
",",
"dict_",
",",
"keyword",
")",
":",
"name",
",",
"val",
"=",
"list_",
"[",
"0",
"]",
",",
"list_",
"[",
"1",
"]",
"if",
"name",
"not",
"in",
"dict_",
"[",
"keyword",
"]",
".",
"keys",
"(",
")",
":",
"if",
"name",
"in",
"[",
"'coeff'",
"]",
":",
"dict_",
"[",
"keyword",
"]",
"[",
"name",
"]",
"=",
"[",
"]",
"if",
"name",
"in",
"[",
"'agents'",
",",
"'maxiter'",
"]",
":",
"val",
"=",
"int",
"(",
"val",
")",
"elif",
"name",
"in",
"[",
"'source'",
",",
"'algorithm'",
",",
"'start'",
",",
"'version'",
"]",
":",
"val",
"=",
"str",
"(",
"val",
")",
"else",
":",
"val",
"=",
"float",
"(",
"val",
")",
"if",
"name",
"in",
"[",
"'coeff'",
"]",
":",
"dict_",
"[",
"keyword",
"]",
"[",
"name",
"]",
"+=",
"[",
"val",
"]",
"else",
":",
"dict_",
"[",
"keyword",
"]",
"[",
"name",
"]",
"=",
"val",
"return",
"dict_"
] |
This function processes all of the initialization file, but the
BENE section.
|
[
"This",
"function",
"processes",
"all",
"of",
"the",
"initialization",
"file",
"but",
"the",
"BENE",
"section",
"."
] |
[
"\"\"\" This function processes all of the initialization file, but the\n BENE section.\n \"\"\"",
"# Distribute information",
"# Prepare container.",
"# Type conversion",
"# Collect information",
"# Finishing."
] |
[
{
"param": "list_",
"type": null
},
{
"param": "dict_",
"type": null
},
{
"param": "keyword",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "list_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dict_",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "keyword",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _process_not_bene(list_, dict_, keyword):
name, val = list_[0], list_[1]
if name not in dict_[keyword].keys():
if name in ['coeff']:
dict_[keyword][name] = []
if name in ['agents', 'maxiter']:
val = int(val)
elif name in ['source', 'algorithm', 'start', 'version']:
val = str(val)
else:
val = float(val)
if name in ['coeff']:
dict_[keyword][name] += [val]
else:
dict_[keyword][name] = val
return dict_
| 1,386 | 301 |
e86597e0870f520af045e5dd257ba2bea4d443ef
|
grelleum/diffsync
|
examples/05-nautobot-peeringdb/adapter_nautobot.py
|
[
"Apache-2.0"
] |
Python
|
create
|
<not_specific>
|
def create(cls, diffsync, ids, attrs):
"""Create a new Region record in remote Nautobot.
Args:
diffsync (NautobotRemote): DiffSync adapter owning this Region
ids (dict): Initial values for this model's _identifiers
attrs (dict): Initial values for this model's _attributes
"""
data = {
"name": ids["name"],
"slug": attrs["slug"],
}
if attrs["description"]:
data["description"] = attrs["description"]
if attrs["parent_name"]:
data["parent"] = str(diffsync.get(diffsync.region, attrs["parent_name"]).pk)
diffsync.post("/api/dcim/regions/", data)
return super().create(diffsync, ids=ids, attrs=attrs)
|
Create a new Region record in remote Nautobot.
Args:
diffsync (NautobotRemote): DiffSync adapter owning this Region
ids (dict): Initial values for this model's _identifiers
attrs (dict): Initial values for this model's _attributes
|
Create a new Region record in remote Nautobot.
|
[
"Create",
"a",
"new",
"Region",
"record",
"in",
"remote",
"Nautobot",
"."
] |
def create(cls, diffsync, ids, attrs):
data = {
"name": ids["name"],
"slug": attrs["slug"],
}
if attrs["description"]:
data["description"] = attrs["description"]
if attrs["parent_name"]:
data["parent"] = str(diffsync.get(diffsync.region, attrs["parent_name"]).pk)
diffsync.post("/api/dcim/regions/", data)
return super().create(diffsync, ids=ids, attrs=attrs)
|
[
"def",
"create",
"(",
"cls",
",",
"diffsync",
",",
"ids",
",",
"attrs",
")",
":",
"data",
"=",
"{",
"\"name\"",
":",
"ids",
"[",
"\"name\"",
"]",
",",
"\"slug\"",
":",
"attrs",
"[",
"\"slug\"",
"]",
",",
"}",
"if",
"attrs",
"[",
"\"description\"",
"]",
":",
"data",
"[",
"\"description\"",
"]",
"=",
"attrs",
"[",
"\"description\"",
"]",
"if",
"attrs",
"[",
"\"parent_name\"",
"]",
":",
"data",
"[",
"\"parent\"",
"]",
"=",
"str",
"(",
"diffsync",
".",
"get",
"(",
"diffsync",
".",
"region",
",",
"attrs",
"[",
"\"parent_name\"",
"]",
")",
".",
"pk",
")",
"diffsync",
".",
"post",
"(",
"\"/api/dcim/regions/\"",
",",
"data",
")",
"return",
"super",
"(",
")",
".",
"create",
"(",
"diffsync",
",",
"ids",
"=",
"ids",
",",
"attrs",
"=",
"attrs",
")"
] |
Create a new Region record in remote Nautobot.
|
[
"Create",
"a",
"new",
"Region",
"record",
"in",
"remote",
"Nautobot",
"."
] |
[
"\"\"\"Create a new Region record in remote Nautobot.\n\n Args:\n diffsync (NautobotRemote): DiffSync adapter owning this Region\n ids (dict): Initial values for this model's _identifiers\n attrs (dict): Initial values for this model's _attributes\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "diffsync",
"type": null
},
{
"param": "ids",
"type": null
},
{
"param": "attrs",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "diffsync",
"type": null,
"docstring": "DiffSync adapter owning this Region",
"docstring_tokens": [
"DiffSync",
"adapter",
"owning",
"this",
"Region"
],
"default": null,
"is_optional": false
},
{
"identifier": "ids",
"type": null,
"docstring": "Initial values for this model's _identifiers",
"docstring_tokens": [
"Initial",
"values",
"for",
"this",
"model",
"'",
"s",
"_identifiers"
],
"default": null,
"is_optional": false
},
{
"identifier": "attrs",
"type": null,
"docstring": "Initial values for this model's _attributes",
"docstring_tokens": [
"Initial",
"values",
"for",
"this",
"model",
"'",
"s",
"_attributes"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def create(cls, diffsync, ids, attrs):
data = {
"name": ids["name"],
"slug": attrs["slug"],
}
if attrs["description"]:
data["description"] = attrs["description"]
if attrs["parent_name"]:
data["parent"] = str(diffsync.get(diffsync.region, attrs["parent_name"]).pk)
diffsync.post("/api/dcim/regions/", data)
return super().create(diffsync, ids=ids, attrs=attrs)
| 1,387 | 6 |
5486e73ba0ae100049a501ec731561c73082ba24
|
thaynecurrie/charis-dep
|
charis/tools/charisLogger.py
|
[
"BSD-2-Clause-FreeBSD"
] |
Python
|
addFileHandler
| null |
def addFileHandler(log, lvl=1):
"""
This function will add a file handler to a log with the provided level.
Args:
log (CharisLogger object): A CharisLogger object that was freshly
instantiated.
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
"""
verbose = False
if verbose:
print('Setting FileHandler level to ' + str(lvl))
fh = logging.FileHandler(log.name + '.log')
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
log.addHandler(fh)
|
This function will add a file handler to a log with the provided level.
Args:
log (CharisLogger object): A CharisLogger object that was freshly
instantiated.
lvl (int): The severity level of messages printed to the file with
the file handler, default = 1.
|
This function will add a file handler to a log with the provided level.
|
[
"This",
"function",
"will",
"add",
"a",
"file",
"handler",
"to",
"a",
"log",
"with",
"the",
"provided",
"level",
"."
] |
def addFileHandler(log, lvl=1):
verbose = False
if verbose:
print('Setting FileHandler level to ' + str(lvl))
fh = logging.FileHandler(log.name + '.log')
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
log.addHandler(fh)
|
[
"def",
"addFileHandler",
"(",
"log",
",",
"lvl",
"=",
"1",
")",
":",
"verbose",
"=",
"False",
"if",
"verbose",
":",
"print",
"(",
"'Setting FileHandler level to '",
"+",
"str",
"(",
"lvl",
")",
")",
"fh",
"=",
"logging",
".",
"FileHandler",
"(",
"log",
".",
"name",
"+",
"'.log'",
")",
"fh",
".",
"setLevel",
"(",
"lvl",
")",
"frmtString",
"=",
"'%(asctime)s - %(name)s - %(levelname)s - %(message)s'",
"fFrmt",
"=",
"logging",
".",
"Formatter",
"(",
"frmtString",
")",
"fh",
".",
"setFormatter",
"(",
"fFrmt",
")",
"log",
".",
"addHandler",
"(",
"fh",
")"
] |
This function will add a file handler to a log with the provided level.
|
[
"This",
"function",
"will",
"add",
"a",
"file",
"handler",
"to",
"a",
"log",
"with",
"the",
"provided",
"level",
"."
] |
[
"\"\"\"\n This function will add a file handler to a log with the provided level.\n\n Args:\n log (CharisLogger object): A CharisLogger object that was freshly\n instantiated.\n lvl (int): The severity level of messages printed to the file with\n the file handler, default = 1.\n \"\"\""
] |
[
{
"param": "log",
"type": null
},
{
"param": "lvl",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "log",
"type": null,
"docstring": "A CharisLogger object that was freshly\ninstantiated.",
"docstring_tokens": [
"A",
"CharisLogger",
"object",
"that",
"was",
"freshly",
"instantiated",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "lvl",
"type": null,
"docstring": "The severity level of messages printed to the file with\nthe file handler, default = 1.",
"docstring_tokens": [
"The",
"severity",
"level",
"of",
"messages",
"printed",
"to",
"the",
"file",
"with",
"the",
"file",
"handler",
"default",
"=",
"1",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import logging
def addFileHandler(log, lvl=1):
verbose = False
if verbose:
print('Setting FileHandler level to ' + str(lvl))
fh = logging.FileHandler(log.name + '.log')
fh.setLevel(lvl)
frmtString = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
fFrmt = logging.Formatter(frmtString)
fh.setFormatter(fFrmt)
log.addHandler(fh)
| 1,390 | 616 |
0ccabc6b30c29f72c9fcedb1cfbe99530ca34861
|
jandolezal/certificates
|
certificates/iscc.py
|
[
"MIT"
] |
Python
|
to_csv
|
None
|
def to_csv(cls, data: list, filename: str = 'iscc.csv') -> None:
"""Save list with certificates to csv file."""
pathlib.Path('data').mkdir(exist_ok=True)
with open(pathlib.Path('data') / filename, 'w', newline="") as csvf:
writer = csv.DictWriter(csvf, fieldnames=cls.prepare_fieldnames())
writer.writeheader()
for cert in data:
writer.writerow(dataclasses.asdict(cert))
|
Save list with certificates to csv file.
|
Save list with certificates to csv file.
|
[
"Save",
"list",
"with",
"certificates",
"to",
"csv",
"file",
"."
] |
def to_csv(cls, data: list, filename: str = 'iscc.csv') -> None:
pathlib.Path('data').mkdir(exist_ok=True)
with open(pathlib.Path('data') / filename, 'w', newline="") as csvf:
writer = csv.DictWriter(csvf, fieldnames=cls.prepare_fieldnames())
writer.writeheader()
for cert in data:
writer.writerow(dataclasses.asdict(cert))
|
[
"def",
"to_csv",
"(",
"cls",
",",
"data",
":",
"list",
",",
"filename",
":",
"str",
"=",
"'iscc.csv'",
")",
"->",
"None",
":",
"pathlib",
".",
"Path",
"(",
"'data'",
")",
".",
"mkdir",
"(",
"exist_ok",
"=",
"True",
")",
"with",
"open",
"(",
"pathlib",
".",
"Path",
"(",
"'data'",
")",
"/",
"filename",
",",
"'w'",
",",
"newline",
"=",
"\"\"",
")",
"as",
"csvf",
":",
"writer",
"=",
"csv",
".",
"DictWriter",
"(",
"csvf",
",",
"fieldnames",
"=",
"cls",
".",
"prepare_fieldnames",
"(",
")",
")",
"writer",
".",
"writeheader",
"(",
")",
"for",
"cert",
"in",
"data",
":",
"writer",
".",
"writerow",
"(",
"dataclasses",
".",
"asdict",
"(",
"cert",
")",
")"
] |
Save list with certificates to csv file.
|
[
"Save",
"list",
"with",
"certificates",
"to",
"csv",
"file",
"."
] |
[
"\"\"\"Save list with certificates to csv file.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "data",
"type": "list"
},
{
"param": "filename",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "data",
"type": "list",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "filename",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import dataclasses
import pathlib
import csv
def to_csv(cls, data: list, filename: str = 'iscc.csv') -> None:
pathlib.Path('data').mkdir(exist_ok=True)
with open(pathlib.Path('data') / filename, 'w', newline="") as csvf:
writer = csv.DictWriter(csvf, fieldnames=cls.prepare_fieldnames())
writer.writeheader()
for cert in data:
writer.writerow(dataclasses.asdict(cert))
| 1,392 | 549 |
fad287fcb7f77a556a9cbdb52ea275a111d14e90
|
julmon/temboard-agent
|
temboardagent/tools.py
|
[
"PostgreSQL"
] |
Python
|
check_fqdn
|
<not_specific>
|
def check_fqdn(name):
"""
Check if a hostname is fully qualified, it must only contain
letters, - and have dots.
"""
# StackOverflow #11809631
if re.match(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]' # noqa W605
r'{2,63}\.?$)', name):
return True
else:
return False
|
Check if a hostname is fully qualified, it must only contain
letters, - and have dots.
|
Check if a hostname is fully qualified, it must only contain
letters, - and have dots.
|
[
"Check",
"if",
"a",
"hostname",
"is",
"fully",
"qualified",
"it",
"must",
"only",
"contain",
"letters",
"-",
"and",
"have",
"dots",
"."
] |
def check_fqdn(name):
if re.match(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]'
r'{2,63}\.?$)', name):
return True
else:
return False
|
[
"def",
"check_fqdn",
"(",
"name",
")",
":",
"if",
"re",
".",
"match",
"(",
"r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\\.)+[a-zA-Z]'",
"r'{2,63}\\.?$)'",
",",
"name",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check if a hostname is fully qualified, it must only contain
letters, - and have dots.
|
[
"Check",
"if",
"a",
"hostname",
"is",
"fully",
"qualified",
"it",
"must",
"only",
"contain",
"letters",
"-",
"and",
"have",
"dots",
"."
] |
[
"\"\"\"\n Check if a hostname is fully qualified, it must only contain\n letters, - and have dots.\n \"\"\"",
"# StackOverflow #11809631",
"# noqa W605"
] |
[
{
"param": "name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import re
def check_fqdn(name):
if re.match(r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]'
r'{2,63}\.?$)', name):
return True
else:
return False
| 1,393 | 160 |
eb9a96eba23ce6011e5c3bbca7cb750894bb8349
|
adnaniazi/TALON
|
src/talon/talon.py
|
[
"MIT"
] |
Python
|
search_for_vertex_at_pos
|
<not_specific>
|
def search_for_vertex_at_pos(chromosome, position, location_dict):
""" Given a chromosome and a position (1-based), this function queries the
location dict to determine whether a vertex
fitting those criteria exists. Returns the row if yes, and __ if no.
"""
try:
return location_dict[chromosome][position]
except:
return None
|
Given a chromosome and a position (1-based), this function queries the
location dict to determine whether a vertex
fitting those criteria exists. Returns the row if yes, and __ if no.
|
Given a chromosome and a position (1-based), this function queries the
location dict to determine whether a vertex
fitting those criteria exists. Returns the row if yes, and __ if no.
|
[
"Given",
"a",
"chromosome",
"and",
"a",
"position",
"(",
"1",
"-",
"based",
")",
"this",
"function",
"queries",
"the",
"location",
"dict",
"to",
"determine",
"whether",
"a",
"vertex",
"fitting",
"those",
"criteria",
"exists",
".",
"Returns",
"the",
"row",
"if",
"yes",
"and",
"__",
"if",
"no",
"."
] |
def search_for_vertex_at_pos(chromosome, position, location_dict):
try:
return location_dict[chromosome][position]
except:
return None
|
[
"def",
"search_for_vertex_at_pos",
"(",
"chromosome",
",",
"position",
",",
"location_dict",
")",
":",
"try",
":",
"return",
"location_dict",
"[",
"chromosome",
"]",
"[",
"position",
"]",
"except",
":",
"return",
"None"
] |
Given a chromosome and a position (1-based), this function queries the
location dict to determine whether a vertex
fitting those criteria exists.
|
[
"Given",
"a",
"chromosome",
"and",
"a",
"position",
"(",
"1",
"-",
"based",
")",
"this",
"function",
"queries",
"the",
"location",
"dict",
"to",
"determine",
"whether",
"a",
"vertex",
"fitting",
"those",
"criteria",
"exists",
"."
] |
[
"\"\"\" Given a chromosome and a position (1-based), this function queries the \n location dict to determine whether a vertex \n fitting those criteria exists. Returns the row if yes, and __ if no.\n \"\"\""
] |
[
{
"param": "chromosome",
"type": null
},
{
"param": "position",
"type": null
},
{
"param": "location_dict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "chromosome",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "position",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "location_dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def search_for_vertex_at_pos(chromosome, position, location_dict):
try:
return location_dict[chromosome][position]
except:
return None
| 1,394 | 706 |
3e601f53c8834b4479208f9f57ea32eb7c725dd2
|
gofortargets/CNN_brandsafety
|
knx/text/kte/extract.py
|
[
"Apache-2.0"
] |
Python
|
spearman_rank_similarity
|
<not_specific>
|
def spearman_rank_similarity(input_rank, concept_rank):
"""Compare two rankings based on Spearman's rank correlation
"""
word_to_rank_input = dict((word, rank) for rank, word in enumerate(input_rank))
rank_diff = float(sum((rank - word_to_rank_input[word]) ** 2 for rank, word in enumerate(concept_rank)))
size = len(input_rank)
return 1 - (3 * rank_diff) / (size ** 3 - size)
|
Compare two rankings based on Spearman's rank correlation
|
Compare two rankings based on Spearman's rank correlation
|
[
"Compare",
"two",
"rankings",
"based",
"on",
"Spearman",
"'",
"s",
"rank",
"correlation"
] |
def spearman_rank_similarity(input_rank, concept_rank):
word_to_rank_input = dict((word, rank) for rank, word in enumerate(input_rank))
rank_diff = float(sum((rank - word_to_rank_input[word]) ** 2 for rank, word in enumerate(concept_rank)))
size = len(input_rank)
return 1 - (3 * rank_diff) / (size ** 3 - size)
|
[
"def",
"spearman_rank_similarity",
"(",
"input_rank",
",",
"concept_rank",
")",
":",
"word_to_rank_input",
"=",
"dict",
"(",
"(",
"word",
",",
"rank",
")",
"for",
"rank",
",",
"word",
"in",
"enumerate",
"(",
"input_rank",
")",
")",
"rank_diff",
"=",
"float",
"(",
"sum",
"(",
"(",
"rank",
"-",
"word_to_rank_input",
"[",
"word",
"]",
")",
"**",
"2",
"for",
"rank",
",",
"word",
"in",
"enumerate",
"(",
"concept_rank",
")",
")",
")",
"size",
"=",
"len",
"(",
"input_rank",
")",
"return",
"1",
"-",
"(",
"3",
"*",
"rank_diff",
")",
"/",
"(",
"size",
"**",
"3",
"-",
"size",
")"
] |
Compare two rankings based on Spearman's rank correlation
|
[
"Compare",
"two",
"rankings",
"based",
"on",
"Spearman",
"'",
"s",
"rank",
"correlation"
] |
[
"\"\"\"Compare two rankings based on Spearman's rank correlation\n \"\"\""
] |
[
{
"param": "input_rank",
"type": null
},
{
"param": "concept_rank",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "input_rank",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "concept_rank",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def spearman_rank_similarity(input_rank, concept_rank):
word_to_rank_input = dict((word, rank) for rank, word in enumerate(input_rank))
rank_diff = float(sum((rank - word_to_rank_input[word]) ** 2 for rank, word in enumerate(concept_rank)))
size = len(input_rank)
return 1 - (3 * rank_diff) / (size ** 3 - size)
| 1,395 | 659 |
5be76bc10172639739cc734fbb055fcd68c3a035
|
davidhwyllie/findNeighbour4
|
findNeighbour4_server.py
|
[
"MIT"
] |
Python
|
isjson
|
<not_specific>
|
def isjson(content):
"""returns true if content parses as json, otherwise false. used by unit testing."""
try:
json.loads(content.decode("utf-8"))
return True
except json.decoder.JSONDecodeError:
return False
|
returns true if content parses as json, otherwise false. used by unit testing.
|
returns true if content parses as json, otherwise false. used by unit testing.
|
[
"returns",
"true",
"if",
"content",
"parses",
"as",
"json",
"otherwise",
"false",
".",
"used",
"by",
"unit",
"testing",
"."
] |
def isjson(content):
try:
json.loads(content.decode("utf-8"))
return True
except json.decoder.JSONDecodeError:
return False
|
[
"def",
"isjson",
"(",
"content",
")",
":",
"try",
":",
"json",
".",
"loads",
"(",
"content",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"return",
"True",
"except",
"json",
".",
"decoder",
".",
"JSONDecodeError",
":",
"return",
"False"
] |
returns true if content parses as json, otherwise false.
|
[
"returns",
"true",
"if",
"content",
"parses",
"as",
"json",
"otherwise",
"false",
"."
] |
[
"\"\"\"returns true if content parses as json, otherwise false. used by unit testing.\"\"\""
] |
[
{
"param": "content",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "content",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def isjson(content):
try:
json.loads(content.decode("utf-8"))
return True
except json.decoder.JSONDecodeError:
return False
| 1,397 | 754 |
2770264ccfe8bd5f4fbcb9cbdc4ea5842db70420
|
BrianLusina/PythonSnips
|
puzzles/rectangles/__init__.py
|
[
"Apache-2.0",
"MIT"
] |
Python
|
possible_rect
|
<not_specific>
|
def possible_rect(quartet):
"""
validate that 4 points form a rectangle by comparing distance to centroid of the rectangle for all corners
:param quartet: The four points of the rectangle
:return: True if all 4 points have same distance to the centroid, False otherwise
:rtype: bool
"""
mid_x = 0
mid_y = 0
# centroid
for c in quartet:
mid_x += c.i / 4.0
mid_y += c.j / 4.0
# reference distance using first corner
dx = abs(quartet[0].i - mid_x)
dy = abs(quartet[0].j - mid_y)
# Check all the same distance from centroid are equals
for i in range(1, len(quartet)):
if abs(quartet[i].i - mid_x) != dx or abs(quartet[i].j - mid_y) != dy:
return False
return True
|
validate that 4 points form a rectangle by comparing distance to centroid of the rectangle for all corners
:param quartet: The four points of the rectangle
:return: True if all 4 points have same distance to the centroid, False otherwise
:rtype: bool
|
validate that 4 points form a rectangle by comparing distance to centroid of the rectangle for all corners
|
[
"validate",
"that",
"4",
"points",
"form",
"a",
"rectangle",
"by",
"comparing",
"distance",
"to",
"centroid",
"of",
"the",
"rectangle",
"for",
"all",
"corners"
] |
def possible_rect(quartet):
mid_x = 0
mid_y = 0
for c in quartet:
mid_x += c.i / 4.0
mid_y += c.j / 4.0
dx = abs(quartet[0].i - mid_x)
dy = abs(quartet[0].j - mid_y)
for i in range(1, len(quartet)):
if abs(quartet[i].i - mid_x) != dx or abs(quartet[i].j - mid_y) != dy:
return False
return True
|
[
"def",
"possible_rect",
"(",
"quartet",
")",
":",
"mid_x",
"=",
"0",
"mid_y",
"=",
"0",
"for",
"c",
"in",
"quartet",
":",
"mid_x",
"+=",
"c",
".",
"i",
"/",
"4.0",
"mid_y",
"+=",
"c",
".",
"j",
"/",
"4.0",
"dx",
"=",
"abs",
"(",
"quartet",
"[",
"0",
"]",
".",
"i",
"-",
"mid_x",
")",
"dy",
"=",
"abs",
"(",
"quartet",
"[",
"0",
"]",
".",
"j",
"-",
"mid_y",
")",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"len",
"(",
"quartet",
")",
")",
":",
"if",
"abs",
"(",
"quartet",
"[",
"i",
"]",
".",
"i",
"-",
"mid_x",
")",
"!=",
"dx",
"or",
"abs",
"(",
"quartet",
"[",
"i",
"]",
".",
"j",
"-",
"mid_y",
")",
"!=",
"dy",
":",
"return",
"False",
"return",
"True"
] |
validate that 4 points form a rectangle by comparing distance to centroid of the rectangle for all corners
|
[
"validate",
"that",
"4",
"points",
"form",
"a",
"rectangle",
"by",
"comparing",
"distance",
"to",
"centroid",
"of",
"the",
"rectangle",
"for",
"all",
"corners"
] |
[
"\"\"\"\n validate that 4 points form a rectangle by comparing distance to centroid of the rectangle for all corners\n :param quartet: The four points of the rectangle\n :return: True if all 4 points have same distance to the centroid, False otherwise\n :rtype: bool\n \"\"\"",
"# centroid",
"# reference distance using first corner",
"# Check all the same distance from centroid are equals"
] |
[
{
"param": "quartet",
"type": null
}
] |
{
"returns": [
{
"docstring": "True if all 4 points have same distance to the centroid, False otherwise",
"docstring_tokens": [
"True",
"if",
"all",
"4",
"points",
"have",
"same",
"distance",
"to",
"the",
"centroid",
"False",
"otherwise"
],
"type": "bool"
}
],
"raises": [],
"params": [
{
"identifier": "quartet",
"type": null,
"docstring": "The four points of the rectangle",
"docstring_tokens": [
"The",
"four",
"points",
"of",
"the",
"rectangle"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def possible_rect(quartet):
mid_x = 0
mid_y = 0
for c in quartet:
mid_x += c.i / 4.0
mid_y += c.j / 4.0
dx = abs(quartet[0].i - mid_x)
dy = abs(quartet[0].j - mid_y)
for i in range(1, len(quartet)):
if abs(quartet[i].i - mid_x) != dx or abs(quartet[i].j - mid_y) != dy:
return False
return True
| 1,398 | 264 |
d04c8efbf1cc758346b9bad7865e0da6c04a72ef
|
khushboobhatia01/st2
|
st2common/st2common/models/api/inquiry.py
|
[
"Apache-2.0"
] |
Python
|
from_model
|
<not_specific>
|
def from_model(cls, model, mask_secrets=False, skip_db=False):
"""Create InquiryResponseAPI instance from model
Allows skipping the BaseAPI._from_model function if you already
have a properly formed dict and just need to prune it
:param skip_db: Skip the parent class' _from_model function call
:rtype: InquiryResponseAPI
"""
if not skip_db:
doc = cls._from_model(model, mask_secrets=mask_secrets)
else:
doc = model
newdoc = {"id": doc["id"]}
for field in ["route", "ttl", "users", "roles", "schema"]:
newdoc[field] = doc["result"].get(field)
return cls(**newdoc)
|
Create InquiryResponseAPI instance from model
Allows skipping the BaseAPI._from_model function if you already
have a properly formed dict and just need to prune it
:param skip_db: Skip the parent class' _from_model function call
:rtype: InquiryResponseAPI
|
Create InquiryResponseAPI instance from model
Allows skipping the BaseAPI._from_model function if you already
have a properly formed dict and just need to prune it
|
[
"Create",
"InquiryResponseAPI",
"instance",
"from",
"model",
"Allows",
"skipping",
"the",
"BaseAPI",
".",
"_from_model",
"function",
"if",
"you",
"already",
"have",
"a",
"properly",
"formed",
"dict",
"and",
"just",
"need",
"to",
"prune",
"it"
] |
def from_model(cls, model, mask_secrets=False, skip_db=False):
if not skip_db:
doc = cls._from_model(model, mask_secrets=mask_secrets)
else:
doc = model
newdoc = {"id": doc["id"]}
for field in ["route", "ttl", "users", "roles", "schema"]:
newdoc[field] = doc["result"].get(field)
return cls(**newdoc)
|
[
"def",
"from_model",
"(",
"cls",
",",
"model",
",",
"mask_secrets",
"=",
"False",
",",
"skip_db",
"=",
"False",
")",
":",
"if",
"not",
"skip_db",
":",
"doc",
"=",
"cls",
".",
"_from_model",
"(",
"model",
",",
"mask_secrets",
"=",
"mask_secrets",
")",
"else",
":",
"doc",
"=",
"model",
"newdoc",
"=",
"{",
"\"id\"",
":",
"doc",
"[",
"\"id\"",
"]",
"}",
"for",
"field",
"in",
"[",
"\"route\"",
",",
"\"ttl\"",
",",
"\"users\"",
",",
"\"roles\"",
",",
"\"schema\"",
"]",
":",
"newdoc",
"[",
"field",
"]",
"=",
"doc",
"[",
"\"result\"",
"]",
".",
"get",
"(",
"field",
")",
"return",
"cls",
"(",
"**",
"newdoc",
")"
] |
Create InquiryResponseAPI instance from model
Allows skipping the BaseAPI._from_model function if you already
have a properly formed dict and just need to prune it
|
[
"Create",
"InquiryResponseAPI",
"instance",
"from",
"model",
"Allows",
"skipping",
"the",
"BaseAPI",
".",
"_from_model",
"function",
"if",
"you",
"already",
"have",
"a",
"properly",
"formed",
"dict",
"and",
"just",
"need",
"to",
"prune",
"it"
] |
[
"\"\"\"Create InquiryResponseAPI instance from model\n\n Allows skipping the BaseAPI._from_model function if you already\n have a properly formed dict and just need to prune it\n\n :param skip_db: Skip the parent class' _from_model function call\n :rtype: InquiryResponseAPI\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "model",
"type": null
},
{
"param": "mask_secrets",
"type": null
},
{
"param": "skip_db",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "InquiryResponseAPI"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "model",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "mask_secrets",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "skip_db",
"type": null,
"docstring": "Skip the parent class' _from_model function call",
"docstring_tokens": [
"Skip",
"the",
"parent",
"class",
"'",
"_from_model",
"function",
"call"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def from_model(cls, model, mask_secrets=False, skip_db=False):
if not skip_db:
doc = cls._from_model(model, mask_secrets=mask_secrets)
else:
doc = model
newdoc = {"id": doc["id"]}
for field in ["route", "ttl", "users", "roles", "schema"]:
newdoc[field] = doc["result"].get(field)
return cls(**newdoc)
| 1,401 | 117 |
3177753a82fb02b6ab28e1676ec3caaebc271189
|
5trobl/oaisys
|
src/assets/materials/MaterialTerrain.py
|
[
"MIT"
] |
Python
|
socket_type_to_class
|
<not_specific>
|
def socket_type_to_class(type_id):
"""Mapping of input types to class strings"""
if type_id == 'RGBA': #??
return 'NodeSocketColor'
elif type_id == 'VALUE':
return 'NodeSocketFloat'
elif type_id == 'VECTOR':
return 'NodeSocketVector'
elif type_id == 'CUSTOM':
print("WARNING! Mapping custom socket tupe to float")
return 'NodeSocketFloat'
else:
raise Exception('Unknown node socket type: '+type_id)
|
Mapping of input types to class strings
|
Mapping of input types to class strings
|
[
"Mapping",
"of",
"input",
"types",
"to",
"class",
"strings"
] |
def socket_type_to_class(type_id):
if type_id == 'RGBA':
return 'NodeSocketColor'
elif type_id == 'VALUE':
return 'NodeSocketFloat'
elif type_id == 'VECTOR':
return 'NodeSocketVector'
elif type_id == 'CUSTOM':
print("WARNING! Mapping custom socket tupe to float")
return 'NodeSocketFloat'
else:
raise Exception('Unknown node socket type: '+type_id)
|
[
"def",
"socket_type_to_class",
"(",
"type_id",
")",
":",
"if",
"type_id",
"==",
"'RGBA'",
":",
"return",
"'NodeSocketColor'",
"elif",
"type_id",
"==",
"'VALUE'",
":",
"return",
"'NodeSocketFloat'",
"elif",
"type_id",
"==",
"'VECTOR'",
":",
"return",
"'NodeSocketVector'",
"elif",
"type_id",
"==",
"'CUSTOM'",
":",
"print",
"(",
"\"WARNING! Mapping custom socket tupe to float\"",
")",
"return",
"'NodeSocketFloat'",
"else",
":",
"raise",
"Exception",
"(",
"'Unknown node socket type: '",
"+",
"type_id",
")"
] |
Mapping of input types to class strings
|
[
"Mapping",
"of",
"input",
"types",
"to",
"class",
"strings"
] |
[
"\"\"\"Mapping of input types to class strings\"\"\"",
"#??"
] |
[
{
"param": "type_id",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "type_id",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def socket_type_to_class(type_id):
if type_id == 'RGBA':
return 'NodeSocketColor'
elif type_id == 'VALUE':
return 'NodeSocketFloat'
elif type_id == 'VECTOR':
return 'NodeSocketVector'
elif type_id == 'CUSTOM':
print("WARNING! Mapping custom socket tupe to float")
return 'NodeSocketFloat'
else:
raise Exception('Unknown node socket type: '+type_id)
| 1,402 | 547 |
e4a33667ab3fd607f278a07416c9b08e8c384fb7
|
cltl/LongTailIdentity
|
systems/utils.py
|
[
"Apache-2.0"
] |
Python
|
lookup_person_in_list
|
<not_specific>
|
def lookup_person_in_list(name, ments):
"""Lookup a person in a list of strings as a partial match."""
for m in ments:
if name in m:
return True
return False
|
Lookup a person in a list of strings as a partial match.
|
Lookup a person in a list of strings as a partial match.
|
[
"Lookup",
"a",
"person",
"in",
"a",
"list",
"of",
"strings",
"as",
"a",
"partial",
"match",
"."
] |
def lookup_person_in_list(name, ments):
for m in ments:
if name in m:
return True
return False
|
[
"def",
"lookup_person_in_list",
"(",
"name",
",",
"ments",
")",
":",
"for",
"m",
"in",
"ments",
":",
"if",
"name",
"in",
"m",
":",
"return",
"True",
"return",
"False"
] |
Lookup a person in a list of strings as a partial match.
|
[
"Lookup",
"a",
"person",
"in",
"a",
"list",
"of",
"strings",
"as",
"a",
"partial",
"match",
"."
] |
[
"\"\"\"Lookup a person in a list of strings as a partial match.\"\"\""
] |
[
{
"param": "name",
"type": null
},
{
"param": "ments",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ments",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def lookup_person_in_list(name, ments):
for m in ments:
if name in m:
return True
return False
| 1,403 | 0 |
385e00fd7fecc34f29e7648aa9a492706c3e3e59
|
R-Korea/weekly_R_quiz
|
utils/file_utils.py
|
[
"BSD-3-Clause"
] |
Python
|
is_ignored
|
bool
|
def is_ignored(path: str, ignored_list: List[str]) -> bool:
"""Return if path should be ignored
Args:
path (str): Path to test
ignored_list (List[str]): Path should not contain any of the list
Returns:
bool: If the path should be ignored, it returns True else False
"""
for ignored_path in ignored_list:
if ignored_path in path:
return True
return False
|
Return if path should be ignored
Args:
path (str): Path to test
ignored_list (List[str]): Path should not contain any of the list
Returns:
bool: If the path should be ignored, it returns True else False
|
Return if path should be ignored
|
[
"Return",
"if",
"path",
"should",
"be",
"ignored"
] |
def is_ignored(path: str, ignored_list: List[str]) -> bool:
for ignored_path in ignored_list:
if ignored_path in path:
return True
return False
|
[
"def",
"is_ignored",
"(",
"path",
":",
"str",
",",
"ignored_list",
":",
"List",
"[",
"str",
"]",
")",
"->",
"bool",
":",
"for",
"ignored_path",
"in",
"ignored_list",
":",
"if",
"ignored_path",
"in",
"path",
":",
"return",
"True",
"return",
"False"
] |
Return if path should be ignored
|
[
"Return",
"if",
"path",
"should",
"be",
"ignored"
] |
[
"\"\"\"Return if path should be ignored\n\n Args:\n path (str): Path to test\n ignored_list (List[str]): Path should not contain any of the list\n\n Returns:\n bool: If the path should be ignored, it returns True else False\n\n \"\"\""
] |
[
{
"param": "path",
"type": "str"
},
{
"param": "ignored_list",
"type": "List[str]"
}
] |
{
"returns": [
{
"docstring": "If the path should be ignored, it returns True else False",
"docstring_tokens": [
"If",
"the",
"path",
"should",
"be",
"ignored",
"it",
"returns",
"True",
"else",
"False"
],
"type": "bool"
}
],
"raises": [],
"params": [
{
"identifier": "path",
"type": "str",
"docstring": "Path to test",
"docstring_tokens": [
"Path",
"to",
"test"
],
"default": null,
"is_optional": false
},
{
"identifier": "ignored_list",
"type": "List[str]",
"docstring": "Path should not contain any of the list",
"docstring_tokens": [
"Path",
"should",
"not",
"contain",
"any",
"of",
"the",
"list"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def is_ignored(path: str, ignored_list: List[str]) -> bool:
for ignored_path in ignored_list:
if ignored_path in path:
return True
return False
| 1,404 | 408 |
a36eeb42d8ca87258341de5c8d09dfdc168fa45b
|
mdickinson/refcycle
|
refcycle/directed_graph.py
|
[
"Apache-2.0"
] |
Python
|
from_edge_pairs
|
<not_specific>
|
def from_edge_pairs(cls, vertices, edge_pairs):
"""
Create a DirectedGraph from a collection of vertices
and a collection of pairs giving links between the vertices.
"""
vertices = set(vertices)
edges = set()
heads = {}
tails = {}
# Number the edges arbitrarily.
edge_identifier = itertools.count()
for tail, head in edge_pairs:
edge = next(edge_identifier)
edges.add(edge)
heads[edge] = head
tails[edge] = tail
return cls._raw(
vertices=vertices,
edges=edges,
heads=heads,
tails=tails,
)
|
Create a DirectedGraph from a collection of vertices
and a collection of pairs giving links between the vertices.
|
Create a DirectedGraph from a collection of vertices
and a collection of pairs giving links between the vertices.
|
[
"Create",
"a",
"DirectedGraph",
"from",
"a",
"collection",
"of",
"vertices",
"and",
"a",
"collection",
"of",
"pairs",
"giving",
"links",
"between",
"the",
"vertices",
"."
] |
def from_edge_pairs(cls, vertices, edge_pairs):
vertices = set(vertices)
edges = set()
heads = {}
tails = {}
edge_identifier = itertools.count()
for tail, head in edge_pairs:
edge = next(edge_identifier)
edges.add(edge)
heads[edge] = head
tails[edge] = tail
return cls._raw(
vertices=vertices,
edges=edges,
heads=heads,
tails=tails,
)
|
[
"def",
"from_edge_pairs",
"(",
"cls",
",",
"vertices",
",",
"edge_pairs",
")",
":",
"vertices",
"=",
"set",
"(",
"vertices",
")",
"edges",
"=",
"set",
"(",
")",
"heads",
"=",
"{",
"}",
"tails",
"=",
"{",
"}",
"edge_identifier",
"=",
"itertools",
".",
"count",
"(",
")",
"for",
"tail",
",",
"head",
"in",
"edge_pairs",
":",
"edge",
"=",
"next",
"(",
"edge_identifier",
")",
"edges",
".",
"add",
"(",
"edge",
")",
"heads",
"[",
"edge",
"]",
"=",
"head",
"tails",
"[",
"edge",
"]",
"=",
"tail",
"return",
"cls",
".",
"_raw",
"(",
"vertices",
"=",
"vertices",
",",
"edges",
"=",
"edges",
",",
"heads",
"=",
"heads",
",",
"tails",
"=",
"tails",
",",
")"
] |
Create a DirectedGraph from a collection of vertices
and a collection of pairs giving links between the vertices.
|
[
"Create",
"a",
"DirectedGraph",
"from",
"a",
"collection",
"of",
"vertices",
"and",
"a",
"collection",
"of",
"pairs",
"giving",
"links",
"between",
"the",
"vertices",
"."
] |
[
"\"\"\"\n Create a DirectedGraph from a collection of vertices\n and a collection of pairs giving links between the vertices.\n\n \"\"\"",
"# Number the edges arbitrarily."
] |
[
{
"param": "cls",
"type": null
},
{
"param": "vertices",
"type": null
},
{
"param": "edge_pairs",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "vertices",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "edge_pairs",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import itertools
def from_edge_pairs(cls, vertices, edge_pairs):
vertices = set(vertices)
edges = set()
heads = {}
tails = {}
edge_identifier = itertools.count()
for tail, head in edge_pairs:
edge = next(edge_identifier)
edges.add(edge)
heads[edge] = head
tails[edge] = tail
return cls._raw(
vertices=vertices,
edges=edges,
heads=heads,
tails=tails,
)
| 1,405 | 309 |
9739eaa7b4cd0833d7ea50d3b1f5463918913f70
|
jshepp/Quality-Mods-Program
|
copy-libs.py
|
[
"MIT"
] |
Python
|
copy_libs
|
None
|
def copy_libs(
libs_to_copy: Iterable[str],
source_path: str,
destination_path: str
) -> None:
"""Copy libraries from source to destination."""
for lib in libs_to_copy:
source_lib_path = os.path.join(source_path, lib)
if not os.path.exists(source_lib_path):
raise IOError(f"Library {lib} not found in {source_path}.")
destination_lib_path = os.path.join(destination_path, lib)
print(f"Copying {lib} from \"{source_path}\" to \"{destination_path}\".")
shutil.copyfile(source_lib_path, destination_lib_path)
|
Copy libraries from source to destination.
|
Copy libraries from source to destination.
|
[
"Copy",
"libraries",
"from",
"source",
"to",
"destination",
"."
] |
def copy_libs(
libs_to_copy: Iterable[str],
source_path: str,
destination_path: str
) -> None:
for lib in libs_to_copy:
source_lib_path = os.path.join(source_path, lib)
if not os.path.exists(source_lib_path):
raise IOError(f"Library {lib} not found in {source_path}.")
destination_lib_path = os.path.join(destination_path, lib)
print(f"Copying {lib} from \"{source_path}\" to \"{destination_path}\".")
shutil.copyfile(source_lib_path, destination_lib_path)
|
[
"def",
"copy_libs",
"(",
"libs_to_copy",
":",
"Iterable",
"[",
"str",
"]",
",",
"source_path",
":",
"str",
",",
"destination_path",
":",
"str",
")",
"->",
"None",
":",
"for",
"lib",
"in",
"libs_to_copy",
":",
"source_lib_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"source_path",
",",
"lib",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"source_lib_path",
")",
":",
"raise",
"IOError",
"(",
"f\"Library {lib} not found in {source_path}.\"",
")",
"destination_lib_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"destination_path",
",",
"lib",
")",
"print",
"(",
"f\"Copying {lib} from \\\"{source_path}\\\" to \\\"{destination_path}\\\".\"",
")",
"shutil",
".",
"copyfile",
"(",
"source_lib_path",
",",
"destination_lib_path",
")"
] |
Copy libraries from source to destination.
|
[
"Copy",
"libraries",
"from",
"source",
"to",
"destination",
"."
] |
[
"\"\"\"Copy libraries from source to destination.\"\"\""
] |
[
{
"param": "libs_to_copy",
"type": "Iterable[str]"
},
{
"param": "source_path",
"type": "str"
},
{
"param": "destination_path",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "libs_to_copy",
"type": "Iterable[str]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "source_path",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "destination_path",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import shutil
import os
def copy_libs(
libs_to_copy: Iterable[str],
source_path: str,
destination_path: str
) -> None:
for lib in libs_to_copy:
source_lib_path = os.path.join(source_path, lib)
if not os.path.exists(source_lib_path):
raise IOError(f"Library {lib} not found in {source_path}.")
destination_lib_path = os.path.join(destination_path, lib)
print(f"Copying {lib} from \"{source_path}\" to \"{destination_path}\".")
shutil.copyfile(source_lib_path, destination_lib_path)
| 1,406 | 91 |
69eff81508fadcc641cf47e0566887efeddd79c5
|
vanheeringen-lab/seq2science
|
seq2science/util.py
|
[
"MIT"
] |
Python
|
is_local
|
bool
|
def is_local(assembly: str, ftype: str, config: dict) -> bool:
"""checks if genomic file(s) are present locally"""
file = os.path.join(config['genome_dir'], assembly, assembly)
local_fasta = os.path.exists(f"{file}.fa")
local_gtf = os.path.exists(f"{file}.annotation.gtf")
local_bed = os.path.exists(f"{file}.annotation.bed")
if ftype == "genome":
return local_fasta
if ftype == "annotation":
# check genome and annotations, as genome is always needed
return local_gtf and local_bed and local_fasta
|
checks if genomic file(s) are present locally
|
checks if genomic file(s) are present locally
|
[
"checks",
"if",
"genomic",
"file",
"(",
"s",
")",
"are",
"present",
"locally"
] |
def is_local(assembly: str, ftype: str, config: dict) -> bool:
file = os.path.join(config['genome_dir'], assembly, assembly)
local_fasta = os.path.exists(f"{file}.fa")
local_gtf = os.path.exists(f"{file}.annotation.gtf")
local_bed = os.path.exists(f"{file}.annotation.bed")
if ftype == "genome":
return local_fasta
if ftype == "annotation":
return local_gtf and local_bed and local_fasta
|
[
"def",
"is_local",
"(",
"assembly",
":",
"str",
",",
"ftype",
":",
"str",
",",
"config",
":",
"dict",
")",
"->",
"bool",
":",
"file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"config",
"[",
"'genome_dir'",
"]",
",",
"assembly",
",",
"assembly",
")",
"local_fasta",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"f\"{file}.fa\"",
")",
"local_gtf",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"f\"{file}.annotation.gtf\"",
")",
"local_bed",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"f\"{file}.annotation.bed\"",
")",
"if",
"ftype",
"==",
"\"genome\"",
":",
"return",
"local_fasta",
"if",
"ftype",
"==",
"\"annotation\"",
":",
"return",
"local_gtf",
"and",
"local_bed",
"and",
"local_fasta"
] |
checks if genomic file(s) are present locally
|
[
"checks",
"if",
"genomic",
"file",
"(",
"s",
")",
"are",
"present",
"locally"
] |
[
"\"\"\"checks if genomic file(s) are present locally\"\"\"",
"# check genome and annotations, as genome is always needed"
] |
[
{
"param": "assembly",
"type": "str"
},
{
"param": "ftype",
"type": "str"
},
{
"param": "config",
"type": "dict"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "assembly",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "ftype",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "config",
"type": "dict",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def is_local(assembly: str, ftype: str, config: dict) -> bool:
file = os.path.join(config['genome_dir'], assembly, assembly)
local_fasta = os.path.exists(f"{file}.fa")
local_gtf = os.path.exists(f"{file}.annotation.gtf")
local_bed = os.path.exists(f"{file}.annotation.bed")
if ftype == "genome":
return local_fasta
if ftype == "annotation":
return local_gtf and local_bed and local_fasta
| 1,407 | 644 |
a81fcb65870b2d28996af4f9a3af86224019531d
|
Fed-X/screeps-starter-python-master
|
build.py
|
[
"MIT"
] |
Python
|
possible_pip_binary_paths
|
<not_specific>
|
def possible_pip_binary_paths(config):
"""
Finds all different places to look for a `pip` binary to run.
:type config: Configuration
"""
files = [
os.path.join(config.base_dir, 'env', 'bin', 'pip'),
os.path.join(config.base_dir, 'env', 'bin', 'pip.exe'),
os.path.join(config.base_dir, 'env', 'Scripts', 'pip.exe')
]
if not config.enter_env:
for path in [shutil.which('pip'), shutil.which('pip.exe')]:
if path is not None:
files.append(path)
return files
|
Finds all different places to look for a `pip` binary to run.
:type config: Configuration
|
Finds all different places to look for a `pip` binary to run.
|
[
"Finds",
"all",
"different",
"places",
"to",
"look",
"for",
"a",
"`",
"pip",
"`",
"binary",
"to",
"run",
"."
] |
def possible_pip_binary_paths(config):
files = [
os.path.join(config.base_dir, 'env', 'bin', 'pip'),
os.path.join(config.base_dir, 'env', 'bin', 'pip.exe'),
os.path.join(config.base_dir, 'env', 'Scripts', 'pip.exe')
]
if not config.enter_env:
for path in [shutil.which('pip'), shutil.which('pip.exe')]:
if path is not None:
files.append(path)
return files
|
[
"def",
"possible_pip_binary_paths",
"(",
"config",
")",
":",
"files",
"=",
"[",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"base_dir",
",",
"'env'",
",",
"'bin'",
",",
"'pip'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"base_dir",
",",
"'env'",
",",
"'bin'",
",",
"'pip.exe'",
")",
",",
"os",
".",
"path",
".",
"join",
"(",
"config",
".",
"base_dir",
",",
"'env'",
",",
"'Scripts'",
",",
"'pip.exe'",
")",
"]",
"if",
"not",
"config",
".",
"enter_env",
":",
"for",
"path",
"in",
"[",
"shutil",
".",
"which",
"(",
"'pip'",
")",
",",
"shutil",
".",
"which",
"(",
"'pip.exe'",
")",
"]",
":",
"if",
"path",
"is",
"not",
"None",
":",
"files",
".",
"append",
"(",
"path",
")",
"return",
"files"
] |
Finds all different places to look for a `pip` binary to run.
|
[
"Finds",
"all",
"different",
"places",
"to",
"look",
"for",
"a",
"`",
"pip",
"`",
"binary",
"to",
"run",
"."
] |
[
"\"\"\"\n Finds all different places to look for a `pip` binary to run.\n\n :type config: Configuration\n \"\"\""
] |
[
{
"param": "config",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "config",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import shutil
import os
def possible_pip_binary_paths(config):
files = [
os.path.join(config.base_dir, 'env', 'bin', 'pip'),
os.path.join(config.base_dir, 'env', 'bin', 'pip.exe'),
os.path.join(config.base_dir, 'env', 'Scripts', 'pip.exe')
]
if not config.enter_env:
for path in [shutil.which('pip'), shutil.which('pip.exe')]:
if path is not None:
files.append(path)
return files
| 1,409 | 6 |
03a5a849c4550767a5be0dff227651edc7bf79f1
|
syhw/contextual_word_segmentation
|
mark_scripts/topicsandcollocations/tb.py
|
[
"MIT"
] |
Python
|
is_phrasal
|
<not_specific>
|
def is_phrasal(subtree):
"""True if this treebank subtree is not a terminal or a preterminal node."""
return isinstance(subtree, list) and \
(len(subtree) == 1 or isinstance(subtree[1], list))
|
True if this treebank subtree is not a terminal or a preterminal node.
|
True if this treebank subtree is not a terminal or a preterminal node.
|
[
"True",
"if",
"this",
"treebank",
"subtree",
"is",
"not",
"a",
"terminal",
"or",
"a",
"preterminal",
"node",
"."
] |
def is_phrasal(subtree):
return isinstance(subtree, list) and \
(len(subtree) == 1 or isinstance(subtree[1], list))
|
[
"def",
"is_phrasal",
"(",
"subtree",
")",
":",
"return",
"isinstance",
"(",
"subtree",
",",
"list",
")",
"and",
"(",
"len",
"(",
"subtree",
")",
"==",
"1",
"or",
"isinstance",
"(",
"subtree",
"[",
"1",
"]",
",",
"list",
")",
")"
] |
True if this treebank subtree is not a terminal or a preterminal node.
|
[
"True",
"if",
"this",
"treebank",
"subtree",
"is",
"not",
"a",
"terminal",
"or",
"a",
"preterminal",
"node",
"."
] |
[
"\"\"\"True if this treebank subtree is not a terminal or a preterminal node.\"\"\""
] |
[
{
"param": "subtree",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "subtree",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_phrasal(subtree):
return isinstance(subtree, list) and \
(len(subtree) == 1 or isinstance(subtree[1], list))
| 1,410 | 498 |
6abc38d63517475e251118f4d517210da9b58e5c
|
Nico-Curti/DNetPRO
|
setup.py
|
[
"MIT"
] |
Python
|
read_description
|
<not_specific>
|
def read_description (readme_filename):
'''
Description package from filename
Parameters
----------
readme_filename : str
filename with readme information (e.g README.md)
Returns
-------
description : str
str with description
'''
try:
with open(readme_filename, 'r') as fp:
description = '\n'
description += fp.read()
except Exception:
return ''
|
Description package from filename
Parameters
----------
readme_filename : str
filename with readme information (e.g README.md)
Returns
-------
description : str
str with description
|
Description package from filename
Parameters
readme_filename : str
filename with readme information
Returns
description : str
str with description
|
[
"Description",
"package",
"from",
"filename",
"Parameters",
"readme_filename",
":",
"str",
"filename",
"with",
"readme",
"information",
"Returns",
"description",
":",
"str",
"str",
"with",
"description"
] |
def read_description (readme_filename):
try:
with open(readme_filename, 'r') as fp:
description = '\n'
description += fp.read()
except Exception:
return ''
|
[
"def",
"read_description",
"(",
"readme_filename",
")",
":",
"try",
":",
"with",
"open",
"(",
"readme_filename",
",",
"'r'",
")",
"as",
"fp",
":",
"description",
"=",
"'\\n'",
"description",
"+=",
"fp",
".",
"read",
"(",
")",
"except",
"Exception",
":",
"return",
"''"
] |
Description package from filename
Parameters
|
[
"Description",
"package",
"from",
"filename",
"Parameters"
] |
[
"'''\n Description package from filename\n\n Parameters\n ----------\n readme_filename : str\n filename with readme information (e.g README.md)\n\n Returns\n -------\n description : str\n str with description\n '''"
] |
[
{
"param": "readme_filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "readme_filename",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def read_description (readme_filename):
try:
with open(readme_filename, 'r') as fp:
description = '\n'
description += fp.read()
except Exception:
return ''
| 1,411 | 389 |
f1201c77eb98f8ab3338ef2e28f887f61c466539
|
elliottd/imagination
|
nmt/utils.py
|
[
"BSD-3-Clause"
] |
Python
|
zipp
| null |
def zipp(params, theano_params):
"""
Push parameters to Theano shared variables
:param params:
:param theano_params:
:return:
"""
for kk, vv in params.items():
theano_params[kk].set_value(vv)
|
Push parameters to Theano shared variables
:param params:
:param theano_params:
:return:
|
Push parameters to Theano shared variables
|
[
"Push",
"parameters",
"to",
"Theano",
"shared",
"variables"
] |
def zipp(params, theano_params):
for kk, vv in params.items():
theano_params[kk].set_value(vv)
|
[
"def",
"zipp",
"(",
"params",
",",
"theano_params",
")",
":",
"for",
"kk",
",",
"vv",
"in",
"params",
".",
"items",
"(",
")",
":",
"theano_params",
"[",
"kk",
"]",
".",
"set_value",
"(",
"vv",
")"
] |
Push parameters to Theano shared variables
|
[
"Push",
"parameters",
"to",
"Theano",
"shared",
"variables"
] |
[
"\"\"\"\n Push parameters to Theano shared variables\n\n :param params:\n :param theano_params:\n :return:\n \"\"\""
] |
[
{
"param": "params",
"type": null
},
{
"param": "theano_params",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "params",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
},
{
"identifier": "theano_params",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def zipp(params, theano_params):
for kk, vv in params.items():
theano_params[kk].set_value(vv)
| 1,412 | 443 |
7a46bbec52297b8fa896d0a0633925038d3635c3
|
bala-95/koalas
|
databricks/koalas/series.py
|
[
"Apache-2.0"
] |
Python
|
unpack_scalar
|
<not_specific>
|
def unpack_scalar(sdf):
"""
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
"""
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
|
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
|
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
|
[
"Takes",
"a",
"dataframe",
"that",
"is",
"supposed",
"to",
"contain",
"a",
"single",
"row",
"with",
"a",
"single",
"scalar",
"value",
"and",
"returns",
"this",
"value",
"."
] |
def unpack_scalar(sdf):
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
|
[
"def",
"unpack_scalar",
"(",
"sdf",
")",
":",
"l",
"=",
"sdf",
".",
"head",
"(",
"2",
")",
"assert",
"len",
"(",
"l",
")",
"==",
"1",
",",
"(",
"sdf",
",",
"l",
")",
"row",
"=",
"l",
"[",
"0",
"]",
"l2",
"=",
"list",
"(",
"row",
".",
"asDict",
"(",
")",
".",
"values",
"(",
")",
")",
"assert",
"len",
"(",
"l2",
")",
"==",
"1",
",",
"(",
"row",
",",
"l2",
")",
"return",
"l2",
"[",
"0",
"]"
] |
Takes a dataframe that is supposed to contain a single row with a single scalar value,
and returns this value.
|
[
"Takes",
"a",
"dataframe",
"that",
"is",
"supposed",
"to",
"contain",
"a",
"single",
"row",
"with",
"a",
"single",
"scalar",
"value",
"and",
"returns",
"this",
"value",
"."
] |
[
"\"\"\"\n Takes a dataframe that is supposed to contain a single row with a single scalar value,\n and returns this value.\n \"\"\""
] |
[
{
"param": "sdf",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "sdf",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def unpack_scalar(sdf):
l = sdf.head(2)
assert len(l) == 1, (sdf, l)
row = l[0]
l2 = list(row.asDict().values())
assert len(l2) == 1, (row, l2)
return l2[0]
| 1,413 | 390 |
092ee5784aaf895c5200d72e81aa0c7a27076816
|
py-graphit/py-graphit
|
graphit/graph_io/io_gml_format.py
|
[
"Apache-2.0"
] |
Python
|
build_edges
| null |
def build_edges(graph, record):
"""
Add edges to graphit Graph based on GML edge records
:param graph: graphit Graph to add edges to
:type graph: :graphit:Graph
:param record: intermediate GML record hierarchy containing edges
:type record: :Record:
:return: Graph with edges added
:rtype: :graphit:Graph
"""
if record.name == 'edge':
source = None
target = None
for attr in record.attr:
if attr[0] == 'source':
source = attr[1]
elif attr[0] == 'target':
target = attr[1]
if source is not None and target is not None:
graph.add_edge(source, target, **record.to_dict({}))
else:
logging.error("GML import, skipping edge without 'source' and/or 'target'")
for child_record in record:
build_edges(graph, child_record)
|
Add edges to graphit Graph based on GML edge records
:param graph: graphit Graph to add edges to
:type graph: :graphit:Graph
:param record: intermediate GML record hierarchy containing edges
:type record: :Record:
:return: Graph with edges added
:rtype: :graphit:Graph
|
Add edges to graphit Graph based on GML edge records
|
[
"Add",
"edges",
"to",
"graphit",
"Graph",
"based",
"on",
"GML",
"edge",
"records"
] |
def build_edges(graph, record):
if record.name == 'edge':
source = None
target = None
for attr in record.attr:
if attr[0] == 'source':
source = attr[1]
elif attr[0] == 'target':
target = attr[1]
if source is not None and target is not None:
graph.add_edge(source, target, **record.to_dict({}))
else:
logging.error("GML import, skipping edge without 'source' and/or 'target'")
for child_record in record:
build_edges(graph, child_record)
|
[
"def",
"build_edges",
"(",
"graph",
",",
"record",
")",
":",
"if",
"record",
".",
"name",
"==",
"'edge'",
":",
"source",
"=",
"None",
"target",
"=",
"None",
"for",
"attr",
"in",
"record",
".",
"attr",
":",
"if",
"attr",
"[",
"0",
"]",
"==",
"'source'",
":",
"source",
"=",
"attr",
"[",
"1",
"]",
"elif",
"attr",
"[",
"0",
"]",
"==",
"'target'",
":",
"target",
"=",
"attr",
"[",
"1",
"]",
"if",
"source",
"is",
"not",
"None",
"and",
"target",
"is",
"not",
"None",
":",
"graph",
".",
"add_edge",
"(",
"source",
",",
"target",
",",
"**",
"record",
".",
"to_dict",
"(",
"{",
"}",
")",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"GML import, skipping edge without 'source' and/or 'target'\"",
")",
"for",
"child_record",
"in",
"record",
":",
"build_edges",
"(",
"graph",
",",
"child_record",
")"
] |
Add edges to graphit Graph based on GML edge records
|
[
"Add",
"edges",
"to",
"graphit",
"Graph",
"based",
"on",
"GML",
"edge",
"records"
] |
[
"\"\"\"\n Add edges to graphit Graph based on GML edge records\n\n :param graph: graphit Graph to add edges to\n :type graph: :graphit:Graph\n :param record: intermediate GML record hierarchy containing edges\n :type record: :Record:\n\n :return: Graph with edges added\n :rtype: :graphit:Graph\n \"\"\""
] |
[
{
"param": "graph",
"type": null
},
{
"param": "record",
"type": null
}
] |
{
"returns": [
{
"docstring": "Graph with edges added",
"docstring_tokens": [
"Graph",
"with",
"edges",
"added"
],
"type": ":graphit:Graph"
}
],
"raises": [],
"params": [
{
"identifier": "graph",
"type": null,
"docstring": "graphit Graph to add edges to",
"docstring_tokens": [
"graphit",
"Graph",
"to",
"add",
"edges",
"to"
],
"default": null,
"is_optional": null
},
{
"identifier": "record",
"type": null,
"docstring": "intermediate GML record hierarchy containing edges",
"docstring_tokens": [
"intermediate",
"GML",
"record",
"hierarchy",
"containing",
"edges"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import logging
def build_edges(graph, record):
if record.name == 'edge':
source = None
target = None
for attr in record.attr:
if attr[0] == 'source':
source = attr[1]
elif attr[0] == 'target':
target = attr[1]
if source is not None and target is not None:
graph.add_edge(source, target, **record.to_dict({}))
else:
logging.error("GML import, skipping edge without 'source' and/or 'target'")
for child_record in record:
build_edges(graph, child_record)
| 1,414 | 860 |
1dd40146b0001b1b7118df9a1abddab0a43dc13b
|
zivid/python-samples
|
source/applications/advanced/hand_eye_calibration/verify_hand_eye_with_visualization.py
|
[
"BSD-3-Clause"
] |
Python
|
_path_list_creator
|
<not_specific>
|
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name):
"""Creates a list of paths where the files have a predefined prefix,
an incremental number and a predefined suffix on their name,
respectively. Eg.: img01.zdf
Args:
path: a path that leads to the files directory
file_prefix_name: a string that comes before the number
number_of_digits_zfill: a number of digits in the number
file_suffix_name: a string that comes after the number
Returns:
list_of_paths: list of appended paths
"""
num = 1
list_of_paths = []
while True:
file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}"
list_of_paths.append(file_path)
next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}"
if not next_file_path.exists():
return list_of_paths
num = num + 1
|
Creates a list of paths where the files have a predefined prefix,
an incremental number and a predefined suffix on their name,
respectively. Eg.: img01.zdf
Args:
path: a path that leads to the files directory
file_prefix_name: a string that comes before the number
number_of_digits_zfill: a number of digits in the number
file_suffix_name: a string that comes after the number
Returns:
list_of_paths: list of appended paths
|
Creates a list of paths where the files have a predefined prefix,
an incremental number and a predefined suffix on their name,
respectively.
|
[
"Creates",
"a",
"list",
"of",
"paths",
"where",
"the",
"files",
"have",
"a",
"predefined",
"prefix",
"an",
"incremental",
"number",
"and",
"a",
"predefined",
"suffix",
"on",
"their",
"name",
"respectively",
"."
] |
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name):
num = 1
list_of_paths = []
while True:
file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}"
list_of_paths.append(file_path)
next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}"
if not next_file_path.exists():
return list_of_paths
num = num + 1
|
[
"def",
"_path_list_creator",
"(",
"path",
",",
"file_prefix_name",
",",
"number_of_digits_zfill",
",",
"file_suffix_name",
")",
":",
"num",
"=",
"1",
"list_of_paths",
"=",
"[",
"]",
"while",
"True",
":",
"file_path",
"=",
"path",
"/",
"f\"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}\"",
"list_of_paths",
".",
"append",
"(",
"file_path",
")",
"next_file_path",
"=",
"path",
"/",
"f\"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}\"",
"if",
"not",
"next_file_path",
".",
"exists",
"(",
")",
":",
"return",
"list_of_paths",
"num",
"=",
"num",
"+",
"1"
] |
Creates a list of paths where the files have a predefined prefix,
an incremental number and a predefined suffix on their name,
respectively.
|
[
"Creates",
"a",
"list",
"of",
"paths",
"where",
"the",
"files",
"have",
"a",
"predefined",
"prefix",
"an",
"incremental",
"number",
"and",
"a",
"predefined",
"suffix",
"on",
"their",
"name",
"respectively",
"."
] |
[
"\"\"\"Creates a list of paths where the files have a predefined prefix,\n an incremental number and a predefined suffix on their name,\n respectively. Eg.: img01.zdf\n\n Args:\n path: a path that leads to the files directory\n file_prefix_name: a string that comes before the number\n number_of_digits_zfill: a number of digits in the number\n file_suffix_name: a string that comes after the number\n\n Returns:\n list_of_paths: list of appended paths\n\n \"\"\""
] |
[
{
"param": "path",
"type": null
},
{
"param": "file_prefix_name",
"type": null
},
{
"param": "number_of_digits_zfill",
"type": null
},
{
"param": "file_suffix_name",
"type": null
}
] |
{
"returns": [
{
"docstring": "list of appended paths",
"docstring_tokens": [
"list",
"of",
"appended",
"paths"
],
"type": "list_of_paths"
}
],
"raises": [],
"params": [
{
"identifier": "path",
"type": null,
"docstring": "a path that leads to the files directory",
"docstring_tokens": [
"a",
"path",
"that",
"leads",
"to",
"the",
"files",
"directory"
],
"default": null,
"is_optional": null
},
{
"identifier": "file_prefix_name",
"type": null,
"docstring": "a string that comes before the number",
"docstring_tokens": [
"a",
"string",
"that",
"comes",
"before",
"the",
"number"
],
"default": null,
"is_optional": null
},
{
"identifier": "number_of_digits_zfill",
"type": null,
"docstring": "a number of digits in the number",
"docstring_tokens": [
"a",
"number",
"of",
"digits",
"in",
"the",
"number"
],
"default": null,
"is_optional": null
},
{
"identifier": "file_suffix_name",
"type": null,
"docstring": "a string that comes after the number",
"docstring_tokens": [
"a",
"string",
"that",
"comes",
"after",
"the",
"number"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _path_list_creator(path, file_prefix_name, number_of_digits_zfill, file_suffix_name):
num = 1
list_of_paths = []
while True:
file_path = path / f"{file_prefix_name}{str(num).zfill(number_of_digits_zfill)}{file_suffix_name}"
list_of_paths.append(file_path)
next_file_path = path / f"{file_prefix_name}{str(num+1).zfill(number_of_digits_zfill)}{file_suffix_name}"
if not next_file_path.exists():
return list_of_paths
num = num + 1
| 1,415 | 621 |
3ca07b7c06085378c58ca5656531bba595b1322e
|
JarronL/pyNRC
|
pynrc/simul/apt.py
|
[
"MIT"
] |
Python
|
_get_entry
|
<not_specific>
|
def _get_entry(dict, entry_number):
"""Return a numbered entry from a dictionary that corresponds to the observataion_list.yaml.
Parameters
----------
dict
entry_number
Returns
-------
"""
entry_key = 'EntryNumber{}'.format(entry_number)
for key, observation in dict.items():
if entry_key in observation.keys():
return observation[entry_key]
|
Return a numbered entry from a dictionary that corresponds to the observataion_list.yaml.
Parameters
----------
dict
entry_number
Returns
-------
|
Return a numbered entry from a dictionary that corresponds to the observataion_list.yaml.
Parameters
Returns
|
[
"Return",
"a",
"numbered",
"entry",
"from",
"a",
"dictionary",
"that",
"corresponds",
"to",
"the",
"observataion_list",
".",
"yaml",
".",
"Parameters",
"Returns"
] |
def _get_entry(dict, entry_number):
entry_key = 'EntryNumber{}'.format(entry_number)
for key, observation in dict.items():
if entry_key in observation.keys():
return observation[entry_key]
|
[
"def",
"_get_entry",
"(",
"dict",
",",
"entry_number",
")",
":",
"entry_key",
"=",
"'EntryNumber{}'",
".",
"format",
"(",
"entry_number",
")",
"for",
"key",
",",
"observation",
"in",
"dict",
".",
"items",
"(",
")",
":",
"if",
"entry_key",
"in",
"observation",
".",
"keys",
"(",
")",
":",
"return",
"observation",
"[",
"entry_key",
"]"
] |
Return a numbered entry from a dictionary that corresponds to the observataion_list.yaml.
|
[
"Return",
"a",
"numbered",
"entry",
"from",
"a",
"dictionary",
"that",
"corresponds",
"to",
"the",
"observataion_list",
".",
"yaml",
"."
] |
[
"\"\"\"Return a numbered entry from a dictionary that corresponds to the observataion_list.yaml.\n\n Parameters\n ----------\n dict\n entry_number\n\n Returns\n -------\n\n \"\"\""
] |
[
{
"param": "dict",
"type": null
},
{
"param": "entry_number",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "dict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "entry_number",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _get_entry(dict, entry_number):
entry_key = 'EntryNumber{}'.format(entry_number)
for key, observation in dict.items():
if entry_key in observation.keys():
return observation[entry_key]
| 1,416 | 714 |
70917a8a2daead722d3ab507de4d2c762eaf159b
|
gonzalo-munillag/Exponential_Randomized_Response
|
differential-privacy-library-main/diffprivlib/accountant.py
|
[
"MIT"
] |
Python
|
__total_delta_safe
|
<not_specific>
|
def __total_delta_safe(spent_budget, slack):
"""
Calculate total delta spend of `spent_budget`, with special consideration for floating point arithmetic.
Should yield greater precision, especially for a large number of budget spends with very small delta.
Parameters
----------
spent_budget: list of tuples of the form (epsilon, delta)
List of budget spends, for which the total delta spend is to be calculated.
slack: float
Delta slack parameter for composition of spends.
Returns
-------
float
Total delta spend.
"""
delta_spend = [slack]
for _, delta in spent_budget:
delta_spend.append(delta)
delta_spend.sort()
# (1 - a) * (1 - b) = 1 - (a + b - a * b)
prod = 0
for delta in delta_spend:
prod += delta - prod * delta
return prod
|
Calculate total delta spend of `spent_budget`, with special consideration for floating point arithmetic.
Should yield greater precision, especially for a large number of budget spends with very small delta.
Parameters
----------
spent_budget: list of tuples of the form (epsilon, delta)
List of budget spends, for which the total delta spend is to be calculated.
slack: float
Delta slack parameter for composition of spends.
Returns
-------
float
Total delta spend.
|
Calculate total delta spend of `spent_budget`, with special consideration for floating point arithmetic.
Should yield greater precision, especially for a large number of budget spends with very small delta.
Parameters
list of tuples of the form (epsilon, delta)
List of budget spends, for which the total delta spend is to be calculated.
float
Delta slack parameter for composition of spends.
Returns
float
Total delta spend.
|
[
"Calculate",
"total",
"delta",
"spend",
"of",
"`",
"spent_budget",
"`",
"with",
"special",
"consideration",
"for",
"floating",
"point",
"arithmetic",
".",
"Should",
"yield",
"greater",
"precision",
"especially",
"for",
"a",
"large",
"number",
"of",
"budget",
"spends",
"with",
"very",
"small",
"delta",
".",
"Parameters",
"list",
"of",
"tuples",
"of",
"the",
"form",
"(",
"epsilon",
"delta",
")",
"List",
"of",
"budget",
"spends",
"for",
"which",
"the",
"total",
"delta",
"spend",
"is",
"to",
"be",
"calculated",
".",
"float",
"Delta",
"slack",
"parameter",
"for",
"composition",
"of",
"spends",
".",
"Returns",
"float",
"Total",
"delta",
"spend",
"."
] |
def __total_delta_safe(spent_budget, slack):
delta_spend = [slack]
for _, delta in spent_budget:
delta_spend.append(delta)
delta_spend.sort()
prod = 0
for delta in delta_spend:
prod += delta - prod * delta
return prod
|
[
"def",
"__total_delta_safe",
"(",
"spent_budget",
",",
"slack",
")",
":",
"delta_spend",
"=",
"[",
"slack",
"]",
"for",
"_",
",",
"delta",
"in",
"spent_budget",
":",
"delta_spend",
".",
"append",
"(",
"delta",
")",
"delta_spend",
".",
"sort",
"(",
")",
"prod",
"=",
"0",
"for",
"delta",
"in",
"delta_spend",
":",
"prod",
"+=",
"delta",
"-",
"prod",
"*",
"delta",
"return",
"prod"
] |
Calculate total delta spend of `spent_budget`, with special consideration for floating point arithmetic.
|
[
"Calculate",
"total",
"delta",
"spend",
"of",
"`",
"spent_budget",
"`",
"with",
"special",
"consideration",
"for",
"floating",
"point",
"arithmetic",
"."
] |
[
"\"\"\"\n Calculate total delta spend of `spent_budget`, with special consideration for floating point arithmetic.\n Should yield greater precision, especially for a large number of budget spends with very small delta.\n\n Parameters\n ----------\n spent_budget: list of tuples of the form (epsilon, delta)\n List of budget spends, for which the total delta spend is to be calculated.\n\n slack: float\n Delta slack parameter for composition of spends.\n\n Returns\n -------\n float\n Total delta spend.\n\n \"\"\"",
"# (1 - a) * (1 - b) = 1 - (a + b - a * b)"
] |
[
{
"param": "spent_budget",
"type": null
},
{
"param": "slack",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "spent_budget",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "slack",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def __total_delta_safe(spent_budget, slack):
delta_spend = [slack]
for _, delta in spent_budget:
delta_spend.append(delta)
delta_spend.sort()
prod = 0
for delta in delta_spend:
prod += delta - prod * delta
return prod
| 1,417 | 867 |
bcad74fa95f6b3f544b074c9a5495e11bac5138d
|
patkub/advent-of-code-2021
|
day01/day01.py
|
[
"BSD-3-Clause"
] |
Python
|
count_increased
|
<not_specific>
|
def count_increased(values):
"""
Count how many measurements increased from previous value
:param values: List of values
:returns: Number of measurements that increased from previous value
"""
# how many measurements increased
increased = 0
# first value
previous = values[0]
# rest of the values
for current in values[1:]:
if current > previous:
increased += 1
previous = current
return increased
|
Count how many measurements increased from previous value
:param values: List of values
:returns: Number of measurements that increased from previous value
|
Count how many measurements increased from previous value
|
[
"Count",
"how",
"many",
"measurements",
"increased",
"from",
"previous",
"value"
] |
def count_increased(values):
increased = 0
previous = values[0]
for current in values[1:]:
if current > previous:
increased += 1
previous = current
return increased
|
[
"def",
"count_increased",
"(",
"values",
")",
":",
"increased",
"=",
"0",
"previous",
"=",
"values",
"[",
"0",
"]",
"for",
"current",
"in",
"values",
"[",
"1",
":",
"]",
":",
"if",
"current",
">",
"previous",
":",
"increased",
"+=",
"1",
"previous",
"=",
"current",
"return",
"increased"
] |
Count how many measurements increased from previous value
|
[
"Count",
"how",
"many",
"measurements",
"increased",
"from",
"previous",
"value"
] |
[
"\"\"\"\n Count how many measurements increased from previous value\n\n :param values: List of values\n :returns: Number of measurements that increased from previous value\n \"\"\"",
"# how many measurements increased",
"# first value",
"# rest of the values"
] |
[
{
"param": "values",
"type": null
}
] |
{
"returns": [
{
"docstring": "Number of measurements that increased from previous value",
"docstring_tokens": [
"Number",
"of",
"measurements",
"that",
"increased",
"from",
"previous",
"value"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "values",
"type": null,
"docstring": "List of values",
"docstring_tokens": [
"List",
"of",
"values"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def count_increased(values):
increased = 0
previous = values[0]
for current in values[1:]:
if current > previous:
increased += 1
previous = current
return increased
| 1,418 | 166 |
03ef9f7a7846df269543838f8bf8ee5102e5da23
|
MatthieuMichon/advent-of-code
|
2020/day-17/process.py
|
[
"MIT"
] |
Python
|
list_indexes
|
list
|
def list_indexes(map_: dict[tuple, any], axis: int) -> list:
"""
List the indexes of a given axis in a mapping
:param map_: mapping of a property (activation) per grid position
:param axis: selected grid axis
:return: set of indexes across the given axis
"""
axis_count: int = len(next(iter(map_.keys())))
if axis >= axis_count:
return [0]
indexes = set(position[axis] for position in map_.keys())
index_list = sorted(indexes)
return index_list
|
List the indexes of a given axis in a mapping
:param map_: mapping of a property (activation) per grid position
:param axis: selected grid axis
:return: set of indexes across the given axis
|
List the indexes of a given axis in a mapping
|
[
"List",
"the",
"indexes",
"of",
"a",
"given",
"axis",
"in",
"a",
"mapping"
] |
def list_indexes(map_: dict[tuple, any], axis: int) -> list:
axis_count: int = len(next(iter(map_.keys())))
if axis >= axis_count:
return [0]
indexes = set(position[axis] for position in map_.keys())
index_list = sorted(indexes)
return index_list
|
[
"def",
"list_indexes",
"(",
"map_",
":",
"dict",
"[",
"tuple",
",",
"any",
"]",
",",
"axis",
":",
"int",
")",
"->",
"list",
":",
"axis_count",
":",
"int",
"=",
"len",
"(",
"next",
"(",
"iter",
"(",
"map_",
".",
"keys",
"(",
")",
")",
")",
")",
"if",
"axis",
">=",
"axis_count",
":",
"return",
"[",
"0",
"]",
"indexes",
"=",
"set",
"(",
"position",
"[",
"axis",
"]",
"for",
"position",
"in",
"map_",
".",
"keys",
"(",
")",
")",
"index_list",
"=",
"sorted",
"(",
"indexes",
")",
"return",
"index_list"
] |
List the indexes of a given axis in a mapping
|
[
"List",
"the",
"indexes",
"of",
"a",
"given",
"axis",
"in",
"a",
"mapping"
] |
[
"\"\"\"\n List the indexes of a given axis in a mapping\n\n :param map_: mapping of a property (activation) per grid position\n :param axis: selected grid axis\n :return: set of indexes across the given axis\n \"\"\""
] |
[
{
"param": "map_",
"type": "dict[tuple, any]"
},
{
"param": "axis",
"type": "int"
}
] |
{
"returns": [
{
"docstring": "set of indexes across the given axis",
"docstring_tokens": [
"set",
"of",
"indexes",
"across",
"the",
"given",
"axis"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "map_",
"type": "dict[tuple, any]",
"docstring": "mapping of a property (activation) per grid position",
"docstring_tokens": [
"mapping",
"of",
"a",
"property",
"(",
"activation",
")",
"per",
"grid",
"position"
],
"default": null,
"is_optional": null
},
{
"identifier": "axis",
"type": "int",
"docstring": "selected grid axis",
"docstring_tokens": [
"selected",
"grid",
"axis"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def list_indexes(map_: dict[tuple, any], axis: int) -> list:
axis_count: int = len(next(iter(map_.keys())))
if axis >= axis_count:
return [0]
indexes = set(position[axis] for position in map_.keys())
index_list = sorted(indexes)
return index_list
| 1,419 | 977 |
f7dcce0b9cbace42ef49366065a99f8ef8b6f014
|
adampkehoe/ticdat
|
ticdat/sqlitetd.py
|
[
"BSD-2-Clause"
] |
Python
|
_fix_str
|
<not_specific>
|
def _fix_str(x):
"""
can't fix all the strings. won't work right if some jerk wants to insert '' or some other multiple
of consecutive of '. need bound parameters for that, which precludes storing things in readable sql
"""
rtn = []
for i,c in enumerate(x):
preceeding = x[i-1] if x else ""
following = x[i+1] if i < len(x)-1 else ""
if c != "'" or "'" in (preceeding, following):
rtn.append(c)
else:
rtn.append("''")
return "".join(rtn)
|
can't fix all the strings. won't work right if some jerk wants to insert '' or some other multiple
of consecutive of '. need bound parameters for that, which precludes storing things in readable sql
|
can't fix all the strings. won't work right if some jerk wants to insert '' or some other multiple
of consecutive of '. need bound parameters for that, which precludes storing things in readable sql
|
[
"can",
"'",
"t",
"fix",
"all",
"the",
"strings",
".",
"won",
"'",
"t",
"work",
"right",
"if",
"some",
"jerk",
"wants",
"to",
"insert",
"'",
"'",
"or",
"some",
"other",
"multiple",
"of",
"consecutive",
"of",
"'",
".",
"need",
"bound",
"parameters",
"for",
"that",
"which",
"precludes",
"storing",
"things",
"in",
"readable",
"sql"
] |
def _fix_str(x):
rtn = []
for i,c in enumerate(x):
preceeding = x[i-1] if x else ""
following = x[i+1] if i < len(x)-1 else ""
if c != "'" or "'" in (preceeding, following):
rtn.append(c)
else:
rtn.append("''")
return "".join(rtn)
|
[
"def",
"_fix_str",
"(",
"x",
")",
":",
"rtn",
"=",
"[",
"]",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"x",
")",
":",
"preceeding",
"=",
"x",
"[",
"i",
"-",
"1",
"]",
"if",
"x",
"else",
"\"\"",
"following",
"=",
"x",
"[",
"i",
"+",
"1",
"]",
"if",
"i",
"<",
"len",
"(",
"x",
")",
"-",
"1",
"else",
"\"\"",
"if",
"c",
"!=",
"\"'\"",
"or",
"\"'\"",
"in",
"(",
"preceeding",
",",
"following",
")",
":",
"rtn",
".",
"append",
"(",
"c",
")",
"else",
":",
"rtn",
".",
"append",
"(",
"\"''\"",
")",
"return",
"\"\"",
".",
"join",
"(",
"rtn",
")"
] |
can't fix all the strings.
|
[
"can",
"'",
"t",
"fix",
"all",
"the",
"strings",
"."
] |
[
"\"\"\"\n can't fix all the strings. won't work right if some jerk wants to insert '' or some other multiple\n of consecutive of '. need bound parameters for that, which precludes storing things in readable sql\n \"\"\""
] |
[
{
"param": "x",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "x",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _fix_str(x):
rtn = []
for i,c in enumerate(x):
preceeding = x[i-1] if x else ""
following = x[i+1] if i < len(x)-1 else ""
if c != "'" or "'" in (preceeding, following):
rtn.append(c)
else:
rtn.append("''")
return "".join(rtn)
| 1,420 | 187 |
fcc4458922af76b7afd9e61f2ca5921d56dc125b
|
aitorres/caupo
|
caupo/preprocessing.py
|
[
"MIT"
] |
Python
|
should_remove
|
<not_specific>
|
def should_remove(token):
"""Determines whether a token should be removed"""
if token.startswith("http"):
return True
if token.startswith("@"):
return True
if token.startswith("#"):
return True
return False
|
Determines whether a token should be removed
|
Determines whether a token should be removed
|
[
"Determines",
"whether",
"a",
"token",
"should",
"be",
"removed"
] |
def should_remove(token):
if token.startswith("http"):
return True
if token.startswith("@"):
return True
if token.startswith("#"):
return True
return False
|
[
"def",
"should_remove",
"(",
"token",
")",
":",
"if",
"token",
".",
"startswith",
"(",
"\"http\"",
")",
":",
"return",
"True",
"if",
"token",
".",
"startswith",
"(",
"\"@\"",
")",
":",
"return",
"True",
"if",
"token",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"return",
"True",
"return",
"False"
] |
Determines whether a token should be removed
|
[
"Determines",
"whether",
"a",
"token",
"should",
"be",
"removed"
] |
[
"\"\"\"Determines whether a token should be removed\"\"\""
] |
[
{
"param": "token",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "token",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def should_remove(token):
if token.startswith("http"):
return True
if token.startswith("@"):
return True
if token.startswith("#"):
return True
return False
| 1,421 | 916 |
adf1e5e4799ec737f081bdd072168ebfa97341e7
|
Rreuben/password
|
run.py
|
[
"MIT"
] |
Python
|
generate_password
|
<not_specific>
|
def generate_password():
'''
Case function for the password generator
'''
print('\n')
print('How many digits would you like your password to have? (From 9 to 15)')
num = input()
def generate(passwrd):
'''
The password generator
'''
password = str('')
for x in range(passwrd):
x = random.randint(0, 94)
password += string.printable[x]
return password
if num == '9':
print('\n')
print('Your new password is ' + generate(9))
elif num == '10':
print('\n')
print('Your new password is ' + generate(10))
elif num == '11':
print('\n')
print('Your new password is ' + generate(11))
elif num == '12':
print('\n')
print('Your new password is ' + generate(12))
elif num == '13':
print('\n')
print('Your new password is ' + generate(13))
elif num == '14':
print('\n')
print('Your new password is ' + generate(14))
elif num == '15':
print('\n')
print('Your new password is ' + generate(15))
else:
print('\n')
print('Please stick to the given parameters for now. Thanks :)')
print('\n')
|
Case function for the password generator
|
Case function for the password generator
|
[
"Case",
"function",
"for",
"the",
"password",
"generator"
] |
def generate_password():
print('\n')
print('How many digits would you like your password to have? (From 9 to 15)')
num = input()
def generate(passwrd):
password = str('')
for x in range(passwrd):
x = random.randint(0, 94)
password += string.printable[x]
return password
if num == '9':
print('\n')
print('Your new password is ' + generate(9))
elif num == '10':
print('\n')
print('Your new password is ' + generate(10))
elif num == '11':
print('\n')
print('Your new password is ' + generate(11))
elif num == '12':
print('\n')
print('Your new password is ' + generate(12))
elif num == '13':
print('\n')
print('Your new password is ' + generate(13))
elif num == '14':
print('\n')
print('Your new password is ' + generate(14))
elif num == '15':
print('\n')
print('Your new password is ' + generate(15))
else:
print('\n')
print('Please stick to the given parameters for now. Thanks :)')
print('\n')
|
[
"def",
"generate_password",
"(",
")",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'How many digits would you like your password to have? (From 9 to 15)'",
")",
"num",
"=",
"input",
"(",
")",
"def",
"generate",
"(",
"passwrd",
")",
":",
"'''\n The password generator\n '''",
"password",
"=",
"str",
"(",
"''",
")",
"for",
"x",
"in",
"range",
"(",
"passwrd",
")",
":",
"x",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"94",
")",
"password",
"+=",
"string",
".",
"printable",
"[",
"x",
"]",
"return",
"password",
"if",
"num",
"==",
"'9'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"9",
")",
")",
"elif",
"num",
"==",
"'10'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"10",
")",
")",
"elif",
"num",
"==",
"'11'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"11",
")",
")",
"elif",
"num",
"==",
"'12'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"12",
")",
")",
"elif",
"num",
"==",
"'13'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"13",
")",
")",
"elif",
"num",
"==",
"'14'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"14",
")",
")",
"elif",
"num",
"==",
"'15'",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Your new password is '",
"+",
"generate",
"(",
"15",
")",
")",
"else",
":",
"print",
"(",
"'\\n'",
")",
"print",
"(",
"'Please stick to the given parameters for now. Thanks :)'",
")",
"print",
"(",
"'\\n'",
")"
] |
Case function for the password generator
|
[
"Case",
"function",
"for",
"the",
"password",
"generator"
] |
[
"'''\n Case function for the password generator\n '''",
"'''\n The password generator\n '''"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import string
import random
def generate_password():
print('\n')
print('How many digits would you like your password to have? (From 9 to 15)')
num = input()
def generate(passwrd):
password = str('')
for x in range(passwrd):
x = random.randint(0, 94)
password += string.printable[x]
return password
if num == '9':
print('\n')
print('Your new password is ' + generate(9))
elif num == '10':
print('\n')
print('Your new password is ' + generate(10))
elif num == '11':
print('\n')
print('Your new password is ' + generate(11))
elif num == '12':
print('\n')
print('Your new password is ' + generate(12))
elif num == '13':
print('\n')
print('Your new password is ' + generate(13))
elif num == '14':
print('\n')
print('Your new password is ' + generate(14))
elif num == '15':
print('\n')
print('Your new password is ' + generate(15))
else:
print('\n')
print('Please stick to the given parameters for now. Thanks :)')
print('\n')
| 1,422 | 489 |
990211d465e756af35649c700b3b320a3e0e2761
|
Pantalaymon/coreferee
|
coreferee/rules.py
|
[
"Apache-2.0"
] |
Python
|
is_token_in_one_of_phrases
|
bool
|
def is_token_in_one_of_phrases(token: Token, phrases: List[str]) -> bool:
"""Checks whether *token* is part of a phrase that is listed in *phrases*."""
doc = token.doc
token_text = token.text.lower()
for phrase in phrases:
phrase_words = phrase.lower().split()
if token_text not in phrase_words:
continue
possible_index = phrase_words.index(token_text)
start_index = max(0, token.i - possible_index)
end_index = token.i + len(phrase_words) - possible_index
if phrase.lower() == " ".join(
[t.text.lower() for t in doc[start_index:end_index]]
):
return True
return False
|
Checks whether *token* is part of a phrase that is listed in *phrases*.
|
Checks whether *token* is part of a phrase that is listed in *phrases*.
|
[
"Checks",
"whether",
"*",
"token",
"*",
"is",
"part",
"of",
"a",
"phrase",
"that",
"is",
"listed",
"in",
"*",
"phrases",
"*",
"."
] |
def is_token_in_one_of_phrases(token: Token, phrases: List[str]) -> bool:
doc = token.doc
token_text = token.text.lower()
for phrase in phrases:
phrase_words = phrase.lower().split()
if token_text not in phrase_words:
continue
possible_index = phrase_words.index(token_text)
start_index = max(0, token.i - possible_index)
end_index = token.i + len(phrase_words) - possible_index
if phrase.lower() == " ".join(
[t.text.lower() for t in doc[start_index:end_index]]
):
return True
return False
|
[
"def",
"is_token_in_one_of_phrases",
"(",
"token",
":",
"Token",
",",
"phrases",
":",
"List",
"[",
"str",
"]",
")",
"->",
"bool",
":",
"doc",
"=",
"token",
".",
"doc",
"token_text",
"=",
"token",
".",
"text",
".",
"lower",
"(",
")",
"for",
"phrase",
"in",
"phrases",
":",
"phrase_words",
"=",
"phrase",
".",
"lower",
"(",
")",
".",
"split",
"(",
")",
"if",
"token_text",
"not",
"in",
"phrase_words",
":",
"continue",
"possible_index",
"=",
"phrase_words",
".",
"index",
"(",
"token_text",
")",
"start_index",
"=",
"max",
"(",
"0",
",",
"token",
".",
"i",
"-",
"possible_index",
")",
"end_index",
"=",
"token",
".",
"i",
"+",
"len",
"(",
"phrase_words",
")",
"-",
"possible_index",
"if",
"phrase",
".",
"lower",
"(",
")",
"==",
"\" \"",
".",
"join",
"(",
"[",
"t",
".",
"text",
".",
"lower",
"(",
")",
"for",
"t",
"in",
"doc",
"[",
"start_index",
":",
"end_index",
"]",
"]",
")",
":",
"return",
"True",
"return",
"False"
] |
Checks whether *token* is part of a phrase that is listed in *phrases*.
|
[
"Checks",
"whether",
"*",
"token",
"*",
"is",
"part",
"of",
"a",
"phrase",
"that",
"is",
"listed",
"in",
"*",
"phrases",
"*",
"."
] |
[
"\"\"\"Checks whether *token* is part of a phrase that is listed in *phrases*.\"\"\""
] |
[
{
"param": "token",
"type": "Token"
},
{
"param": "phrases",
"type": "List[str]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "token",
"type": "Token",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "phrases",
"type": "List[str]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_token_in_one_of_phrases(token: Token, phrases: List[str]) -> bool:
doc = token.doc
token_text = token.text.lower()
for phrase in phrases:
phrase_words = phrase.lower().split()
if token_text not in phrase_words:
continue
possible_index = phrase_words.index(token_text)
start_index = max(0, token.i - possible_index)
end_index = token.i + len(phrase_words) - possible_index
if phrase.lower() == " ".join(
[t.text.lower() for t in doc[start_index:end_index]]
):
return True
return False
| 1,423 | 309 |
0ee2611258f0e8d23be6d5e398242618329ae031
|
mascot6699/py-kin-base
|
kin_base/purecrc16.py
|
[
"Apache-2.0"
] |
Python
|
_crc16
|
<not_specific>
|
def _crc16(data, crc, table):
"""table for caclulating CRC (list of 256 integers)
:param bytes data: Data for calculating CRC.
:param int crc: Initial value.
:param list table: Table for caclulating CRC (list of 256 integers)
:return: calculated value of CRC
"""
bytes_to_int = lambda x: ord(x) if sys.version_info.major == 2 else x
for byte in data:
crc = ((crc << 8) & 0xff00) ^ table[((crc >> 8) & 0xff) ^ bytes_to_int(byte)]
return crc & 0xffff
|
table for caclulating CRC (list of 256 integers)
:param bytes data: Data for calculating CRC.
:param int crc: Initial value.
:param list table: Table for caclulating CRC (list of 256 integers)
:return: calculated value of CRC
|
table for caclulating CRC (list of 256 integers)
|
[
"table",
"for",
"caclulating",
"CRC",
"(",
"list",
"of",
"256",
"integers",
")"
] |
def _crc16(data, crc, table):
bytes_to_int = lambda x: ord(x) if sys.version_info.major == 2 else x
for byte in data:
crc = ((crc << 8) & 0xff00) ^ table[((crc >> 8) & 0xff) ^ bytes_to_int(byte)]
return crc & 0xffff
|
[
"def",
"_crc16",
"(",
"data",
",",
"crc",
",",
"table",
")",
":",
"bytes_to_int",
"=",
"lambda",
"x",
":",
"ord",
"(",
"x",
")",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"2",
"else",
"x",
"for",
"byte",
"in",
"data",
":",
"crc",
"=",
"(",
"(",
"crc",
"<<",
"8",
")",
"&",
"0xff00",
")",
"^",
"table",
"[",
"(",
"(",
"crc",
">>",
"8",
")",
"&",
"0xff",
")",
"^",
"bytes_to_int",
"(",
"byte",
")",
"]",
"return",
"crc",
"&",
"0xffff"
] |
table for caclulating CRC (list of 256 integers)
|
[
"table",
"for",
"caclulating",
"CRC",
"(",
"list",
"of",
"256",
"integers",
")"
] |
[
"\"\"\"table for caclulating CRC (list of 256 integers)\n :param bytes data: Data for calculating CRC.\n :param int crc: Initial value.\n :param list table: Table for caclulating CRC (list of 256 integers)\n :return: calculated value of CRC\n \"\"\""
] |
[
{
"param": "data",
"type": null
},
{
"param": "crc",
"type": null
},
{
"param": "table",
"type": null
}
] |
{
"returns": [
{
"docstring": "calculated value of CRC",
"docstring_tokens": [
"calculated",
"value",
"of",
"CRC"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "data",
"type": null,
"docstring": "Data for calculating CRC.",
"docstring_tokens": [
"Data",
"for",
"calculating",
"CRC",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "crc",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "table",
"type": null,
"docstring": "Table for caclulating CRC (list of 256 integers)",
"docstring_tokens": [
"Table",
"for",
"caclulating",
"CRC",
"(",
"list",
"of",
"256",
"integers",
")"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
import sys
def _crc16(data, crc, table):
bytes_to_int = lambda x: ord(x) if sys.version_info.major == 2 else x
for byte in data:
crc = ((crc << 8) & 0xff00) ^ table[((crc >> 8) & 0xff) ^ bytes_to_int(byte)]
return crc & 0xffff
| 1,424 | 255 |
d6c6e1f13f1641c41b8d3f9ee9d4ead207ee4ce1
|
johnyf/contract_maker
|
contracts_pinfo.py
|
[
"BSD-3-Clause"
] |
Python
|
communication_schedule
|
<not_specific>
|
def communication_schedule(aut):
"""Return assignment to indexed mask vars, as `dict`."""
phases = aut.phases
values = dict()
for phase, j in phases.items():
comm = aut.comm_arch(phase)
for player, vrs in comm.items():
for var, visible in vrs.items():
s = '{player}_mask_{var}_{j}'.format(
player=player, var=var, j=j)
values[s] = 1 - visible
return values
|
Return assignment to indexed mask vars, as `dict`.
|
Return assignment to indexed mask vars, as `dict`.
|
[
"Return",
"assignment",
"to",
"indexed",
"mask",
"vars",
"as",
"`",
"dict",
"`",
"."
] |
def communication_schedule(aut):
phases = aut.phases
values = dict()
for phase, j in phases.items():
comm = aut.comm_arch(phase)
for player, vrs in comm.items():
for var, visible in vrs.items():
s = '{player}_mask_{var}_{j}'.format(
player=player, var=var, j=j)
values[s] = 1 - visible
return values
|
[
"def",
"communication_schedule",
"(",
"aut",
")",
":",
"phases",
"=",
"aut",
".",
"phases",
"values",
"=",
"dict",
"(",
")",
"for",
"phase",
",",
"j",
"in",
"phases",
".",
"items",
"(",
")",
":",
"comm",
"=",
"aut",
".",
"comm_arch",
"(",
"phase",
")",
"for",
"player",
",",
"vrs",
"in",
"comm",
".",
"items",
"(",
")",
":",
"for",
"var",
",",
"visible",
"in",
"vrs",
".",
"items",
"(",
")",
":",
"s",
"=",
"'{player}_mask_{var}_{j}'",
".",
"format",
"(",
"player",
"=",
"player",
",",
"var",
"=",
"var",
",",
"j",
"=",
"j",
")",
"values",
"[",
"s",
"]",
"=",
"1",
"-",
"visible",
"return",
"values"
] |
Return assignment to indexed mask vars, as `dict`.
|
[
"Return",
"assignment",
"to",
"indexed",
"mask",
"vars",
"as",
"`",
"dict",
"`",
"."
] |
[
"\"\"\"Return assignment to indexed mask vars, as `dict`.\"\"\""
] |
[
{
"param": "aut",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "aut",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def communication_schedule(aut):
phases = aut.phases
values = dict()
for phase, j in phases.items():
comm = aut.comm_arch(phase)
for player, vrs in comm.items():
for var, visible in vrs.items():
s = '{player}_mask_{var}_{j}'.format(
player=player, var=var, j=j)
values[s] = 1 - visible
return values
| 1,425 | 94 |
6fcac46b779f3687957062a1824a8e84a3f9bb98
|
AeroNotix/django-timetracker
|
tracker/management/commands/holiday_chart.py
|
[
"BSD-4-Clause"
] |
Python
|
gendates
|
<not_specific>
|
def gendates(year):
'''Generates the dates for a given year.'''
m = {}
d = datetime.date(year, 1, 1)
td = datetime.timedelta(days=1)
while d.year == year:
m[d] = 0
d += td
return m
|
Generates the dates for a given year.
|
Generates the dates for a given year.
|
[
"Generates",
"the",
"dates",
"for",
"a",
"given",
"year",
"."
] |
def gendates(year):
m = {}
d = datetime.date(year, 1, 1)
td = datetime.timedelta(days=1)
while d.year == year:
m[d] = 0
d += td
return m
|
[
"def",
"gendates",
"(",
"year",
")",
":",
"m",
"=",
"{",
"}",
"d",
"=",
"datetime",
".",
"date",
"(",
"year",
",",
"1",
",",
"1",
")",
"td",
"=",
"datetime",
".",
"timedelta",
"(",
"days",
"=",
"1",
")",
"while",
"d",
".",
"year",
"==",
"year",
":",
"m",
"[",
"d",
"]",
"=",
"0",
"d",
"+=",
"td",
"return",
"m"
] |
Generates the dates for a given year.
|
[
"Generates",
"the",
"dates",
"for",
"a",
"given",
"year",
"."
] |
[
"'''Generates the dates for a given year.'''"
] |
[
{
"param": "year",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "year",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import datetime
def gendates(year):
m = {}
d = datetime.date(year, 1, 1)
td = datetime.timedelta(days=1)
while d.year == year:
m[d] = 0
d += td
return m
| 1,426 | 165 |
2eb1b7b9b287460179e0513407411d04f5866817
|
MaxChanger/pytorch-cifar
|
main_ResNet50.py
|
[
"MIT"
] |
Python
|
adjust_learning_rate
| null |
def adjust_learning_rate(optimizer, epoch, T_0=10, eta_max=0.1, eta_min=0.):
"""Sets the learning rate to the initial LR decayed by 10 every 50 epochs"""
# lr = args.lr * (0.1 ** (epoch // 5))
if epoch < 50 :
lr = 0.001
elif epoch < 100 :
lr = 0.0005
elif epoch < 150 :
lr = 0.0003
elif epoch < 200 :
lr = 0.0001
elif epoch < 230 :
lr = 0.00001
else :
lr = 0.000001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('\nlr:====>',lr)
|
Sets the learning rate to the initial LR decayed by 10 every 50 epochs
|
Sets the learning rate to the initial LR decayed by 10 every 50 epochs
|
[
"Sets",
"the",
"learning",
"rate",
"to",
"the",
"initial",
"LR",
"decayed",
"by",
"10",
"every",
"50",
"epochs"
] |
def adjust_learning_rate(optimizer, epoch, T_0=10, eta_max=0.1, eta_min=0.):
if epoch < 50 :
lr = 0.001
elif epoch < 100 :
lr = 0.0005
elif epoch < 150 :
lr = 0.0003
elif epoch < 200 :
lr = 0.0001
elif epoch < 230 :
lr = 0.00001
else :
lr = 0.000001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('\nlr:====>',lr)
|
[
"def",
"adjust_learning_rate",
"(",
"optimizer",
",",
"epoch",
",",
"T_0",
"=",
"10",
",",
"eta_max",
"=",
"0.1",
",",
"eta_min",
"=",
"0.",
")",
":",
"if",
"epoch",
"<",
"50",
":",
"lr",
"=",
"0.001",
"elif",
"epoch",
"<",
"100",
":",
"lr",
"=",
"0.0005",
"elif",
"epoch",
"<",
"150",
":",
"lr",
"=",
"0.0003",
"elif",
"epoch",
"<",
"200",
":",
"lr",
"=",
"0.0001",
"elif",
"epoch",
"<",
"230",
":",
"lr",
"=",
"0.00001",
"else",
":",
"lr",
"=",
"0.000001",
"for",
"param_group",
"in",
"optimizer",
".",
"param_groups",
":",
"param_group",
"[",
"'lr'",
"]",
"=",
"lr",
"print",
"(",
"'\\nlr:====>'",
",",
"lr",
")"
] |
Sets the learning rate to the initial LR decayed by 10 every 50 epochs
|
[
"Sets",
"the",
"learning",
"rate",
"to",
"the",
"initial",
"LR",
"decayed",
"by",
"10",
"every",
"50",
"epochs"
] |
[
"\"\"\"Sets the learning rate to the initial LR decayed by 10 every 50 epochs\"\"\"",
"# lr = args.lr * (0.1 ** (epoch // 5))"
] |
[
{
"param": "optimizer",
"type": null
},
{
"param": "epoch",
"type": null
},
{
"param": "T_0",
"type": null
},
{
"param": "eta_max",
"type": null
},
{
"param": "eta_min",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "optimizer",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "epoch",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "T_0",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "eta_max",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "eta_min",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def adjust_learning_rate(optimizer, epoch, T_0=10, eta_max=0.1, eta_min=0.):
if epoch < 50 :
lr = 0.001
elif epoch < 100 :
lr = 0.0005
elif epoch < 150 :
lr = 0.0003
elif epoch < 200 :
lr = 0.0001
elif epoch < 230 :
lr = 0.00001
else :
lr = 0.000001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('\nlr:====>',lr)
| 1,427 | 847 |
b7399a22f2fa0bca0464e33969429c0c878fc1f8
|
ConnectedSystems/sa-comparison
|
camp_process/data_interface.py
|
[
"0BSD"
] |
Python
|
ensure_str_type
|
<not_specific>
|
def ensure_str_type(df):
"""Convert object elements in dataframe to string."""
is_object = df.dtypes == object
df.loc[:, is_object] = df.loc[:, is_object].astype(str)
return df
|
Convert object elements in dataframe to string.
|
Convert object elements in dataframe to string.
|
[
"Convert",
"object",
"elements",
"in",
"dataframe",
"to",
"string",
"."
] |
def ensure_str_type(df):
is_object = df.dtypes == object
df.loc[:, is_object] = df.loc[:, is_object].astype(str)
return df
|
[
"def",
"ensure_str_type",
"(",
"df",
")",
":",
"is_object",
"=",
"df",
".",
"dtypes",
"==",
"object",
"df",
".",
"loc",
"[",
":",
",",
"is_object",
"]",
"=",
"df",
".",
"loc",
"[",
":",
",",
"is_object",
"]",
".",
"astype",
"(",
"str",
")",
"return",
"df"
] |
Convert object elements in dataframe to string.
|
[
"Convert",
"object",
"elements",
"in",
"dataframe",
"to",
"string",
"."
] |
[
"\"\"\"Convert object elements in dataframe to string.\"\"\""
] |
[
{
"param": "df",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ensure_str_type(df):
is_object = df.dtypes == object
df.loc[:, is_object] = df.loc[:, is_object].astype(str)
return df
| 1,428 | 1,016 |
46e67281a372a29882b9a7ac92e9584882047889
|
spicyj/vim-awesome
|
db/github_repos.py
|
[
"MIT"
] |
Python
|
ensure_blacklisted_repos
| null |
def ensure_blacklisted_repos(cls):
"""Make sure all blacklisted GitHub repos have an entry in the DB
marking them as such.
"""
for owner_repo in cls._BLACKLISTED_GITHUB_REPOS:
owner, repo_name = owner_repo.split('/')
cls.upsert_with_owner_repo({
'owner': owner,
'repo_name': repo_name,
'is_blacklisted': True,
})
|
Make sure all blacklisted GitHub repos have an entry in the DB
marking them as such.
|
Make sure all blacklisted GitHub repos have an entry in the DB
marking them as such.
|
[
"Make",
"sure",
"all",
"blacklisted",
"GitHub",
"repos",
"have",
"an",
"entry",
"in",
"the",
"DB",
"marking",
"them",
"as",
"such",
"."
] |
def ensure_blacklisted_repos(cls):
for owner_repo in cls._BLACKLISTED_GITHUB_REPOS:
owner, repo_name = owner_repo.split('/')
cls.upsert_with_owner_repo({
'owner': owner,
'repo_name': repo_name,
'is_blacklisted': True,
})
|
[
"def",
"ensure_blacklisted_repos",
"(",
"cls",
")",
":",
"for",
"owner_repo",
"in",
"cls",
".",
"_BLACKLISTED_GITHUB_REPOS",
":",
"owner",
",",
"repo_name",
"=",
"owner_repo",
".",
"split",
"(",
"'/'",
")",
"cls",
".",
"upsert_with_owner_repo",
"(",
"{",
"'owner'",
":",
"owner",
",",
"'repo_name'",
":",
"repo_name",
",",
"'is_blacklisted'",
":",
"True",
",",
"}",
")"
] |
Make sure all blacklisted GitHub repos have an entry in the DB
marking them as such.
|
[
"Make",
"sure",
"all",
"blacklisted",
"GitHub",
"repos",
"have",
"an",
"entry",
"in",
"the",
"DB",
"marking",
"them",
"as",
"such",
"."
] |
[
"\"\"\"Make sure all blacklisted GitHub repos have an entry in the DB\n marking them as such.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def ensure_blacklisted_repos(cls):
for owner_repo in cls._BLACKLISTED_GITHUB_REPOS:
owner, repo_name = owner_repo.split('/')
cls.upsert_with_owner_repo({
'owner': owner,
'repo_name': repo_name,
'is_blacklisted': True,
})
| 1,429 | 19 |
d0bf796e0de92c0ebecd599d5c5e40a879c0f6ef
|
CherWeiYuan/GeneScanner
|
genescanner/genescanner.py
|
[
"MIT"
] |
Python
|
RemoveArtefacts
|
<not_specific>
|
def RemoveArtefacts(df, remove):
"""
Create df for output to user
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
remove : TYPE List
DESCRIPTION. Flattened list of index to remove from df
Returns
-------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks
removed.
"""
return df.drop(labels = remove, axis = 0, inplace = False)
|
Create df for output to user
Parameters
----------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet.
remove : TYPE List
DESCRIPTION. Flattened list of index to remove from df
Returns
-------
df : TYPE Pandas dataframe
DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks
removed.
|
Create df for output to user
|
[
"Create",
"df",
"for",
"output",
"to",
"user"
] |
def RemoveArtefacts(df, remove):
return df.drop(labels = remove, axis = 0, inplace = False)
|
[
"def",
"RemoveArtefacts",
"(",
"df",
",",
"remove",
")",
":",
"return",
"df",
".",
"drop",
"(",
"labels",
"=",
"remove",
",",
"axis",
"=",
"0",
",",
"inplace",
"=",
"False",
")"
] |
Create df for output to user
|
[
"Create",
"df",
"for",
"output",
"to",
"user"
] |
[
"\"\"\"\r\n Create df for output to user\r\n \r\n Parameters\r\n ----------\r\n df : TYPE Pandas dataframe\r\n DESCRIPTION. Dataframe of cleaned GeneScan datasheet.\r\n remove : TYPE List\r\n DESCRIPTION. Flattened list of index to remove from df\r\n\r\n Returns\r\n -------\r\n df : TYPE Pandas dataframe\r\n DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks\r\n removed.\r\n \"\"\""
] |
[
{
"param": "df",
"type": null
},
{
"param": "remove",
"type": null
}
] |
{
"returns": [
{
"docstring": "DESCRIPTION. Dataframe of cleaned GeneScan datasheet with dirty peaks\nremoved.",
"docstring_tokens": [
"DESCRIPTION",
".",
"Dataframe",
"of",
"cleaned",
"GeneScan",
"datasheet",
"with",
"dirty",
"peaks",
"removed",
"."
],
"type": "TYPE Pandas dataframe\r"
}
],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": "DESCRIPTION. Dataframe of cleaned GeneScan datasheet.",
"docstring_tokens": [
"DESCRIPTION",
".",
"Dataframe",
"of",
"cleaned",
"GeneScan",
"datasheet",
"."
],
"default": null,
"is_optional": false
},
{
"identifier": "remove",
"type": null,
"docstring": "DESCRIPTION. Flattened list of index to remove from df",
"docstring_tokens": [
"DESCRIPTION",
".",
"Flattened",
"list",
"of",
"index",
"to",
"remove",
"from",
"df"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def RemoveArtefacts(df, remove):
return df.drop(labels = remove, axis = 0, inplace = False)
| 1,430 | 1,022 |
246475656c05d7ae773737f5363d50977df23974
|
petoperico/Learning_Python
|
Exercise06_Alphabetical_substring.py
|
[
"MIT"
] |
Python
|
all_substrings
|
<not_specific>
|
def all_substrings(string):
"""
Function that gets all the substrings in a row.
string: string
return: list
"""
string = string.lower()
substrings_list = []
substrings_list.append(string[0])
for character in range(len(string)):
if (
ord(string[character]) >= 97 and
ord(string[character]) <= 122
):
if (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1) and
(ord(substrings_list[-1]) != ord(string[character]) - 1)
):
substrings_list.append(string[character - 1])
substrings_list.append(string[character])
elif (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1)
):
substrings_list.append(string[character])
if len(substrings_list) < 2:
substrings_list.append(string[-1])
return substrings_list
|
Function that gets all the substrings in a row.
string: string
return: list
|
Function that gets all the substrings in a row.
string: string
list
|
[
"Function",
"that",
"gets",
"all",
"the",
"substrings",
"in",
"a",
"row",
".",
"string",
":",
"string",
"list"
] |
def all_substrings(string):
string = string.lower()
substrings_list = []
substrings_list.append(string[0])
for character in range(len(string)):
if (
ord(string[character]) >= 97 and
ord(string[character]) <= 122
):
if (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1) and
(ord(substrings_list[-1]) != ord(string[character]) - 1)
):
substrings_list.append(string[character - 1])
substrings_list.append(string[character])
elif (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1)
):
substrings_list.append(string[character])
if len(substrings_list) < 2:
substrings_list.append(string[-1])
return substrings_list
|
[
"def",
"all_substrings",
"(",
"string",
")",
":",
"string",
"=",
"string",
".",
"lower",
"(",
")",
"substrings_list",
"=",
"[",
"]",
"substrings_list",
".",
"append",
"(",
"string",
"[",
"0",
"]",
")",
"for",
"character",
"in",
"range",
"(",
"len",
"(",
"string",
")",
")",
":",
"if",
"(",
"ord",
"(",
"string",
"[",
"character",
"]",
")",
">=",
"97",
"and",
"ord",
"(",
"string",
"[",
"character",
"]",
")",
"<=",
"122",
")",
":",
"if",
"(",
"character",
">",
"0",
"and",
"(",
"ord",
"(",
"string",
"[",
"character",
"-",
"1",
"]",
")",
"==",
"ord",
"(",
"string",
"[",
"character",
"]",
")",
"-",
"1",
")",
"and",
"(",
"ord",
"(",
"substrings_list",
"[",
"-",
"1",
"]",
")",
"!=",
"ord",
"(",
"string",
"[",
"character",
"]",
")",
"-",
"1",
")",
")",
":",
"substrings_list",
".",
"append",
"(",
"string",
"[",
"character",
"-",
"1",
"]",
")",
"substrings_list",
".",
"append",
"(",
"string",
"[",
"character",
"]",
")",
"elif",
"(",
"character",
">",
"0",
"and",
"(",
"ord",
"(",
"string",
"[",
"character",
"-",
"1",
"]",
")",
"==",
"ord",
"(",
"string",
"[",
"character",
"]",
")",
"-",
"1",
")",
")",
":",
"substrings_list",
".",
"append",
"(",
"string",
"[",
"character",
"]",
")",
"if",
"len",
"(",
"substrings_list",
")",
"<",
"2",
":",
"substrings_list",
".",
"append",
"(",
"string",
"[",
"-",
"1",
"]",
")",
"return",
"substrings_list"
] |
Function that gets all the substrings in a row.
|
[
"Function",
"that",
"gets",
"all",
"the",
"substrings",
"in",
"a",
"row",
"."
] |
[
"\"\"\"\n Function that gets all the substrings in a row.\n\n string: string\n\n return: list\n \"\"\""
] |
[
{
"param": "string",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "string",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def all_substrings(string):
string = string.lower()
substrings_list = []
substrings_list.append(string[0])
for character in range(len(string)):
if (
ord(string[character]) >= 97 and
ord(string[character]) <= 122
):
if (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1) and
(ord(substrings_list[-1]) != ord(string[character]) - 1)
):
substrings_list.append(string[character - 1])
substrings_list.append(string[character])
elif (
character > 0 and
(ord(string[character - 1]) == ord(string[character]) - 1)
):
substrings_list.append(string[character])
if len(substrings_list) < 2:
substrings_list.append(string[-1])
return substrings_list
| 1,431 | 1,005 |
41d7c4782a8a2079bc2f2c64d83abe5ffdfcad22
|
rsmith-nl/lamprop
|
lp/parser.py
|
[
"BSD-2-Clause"
] |
Python
|
_extended
|
<not_specific>
|
def _extended(original):
"""
Create the extension to the `original` list to make the laminate symmetric.
The position of the comments is taken into account.
"""
if sum(1 for la in original if isinstance(la, str)) == 0:
return original[::-1]
layers = copy.deepcopy(original)
if not isinstance(layers[-1], str):
layers.append("__")
if not isinstance(layers[0], str):
layers.insert(0, "unknown")
idx = [n for n, v in enumerate(layers) if isinstance(v, str)]
pairs = list(zip(idx[:-1], idx[1:]))[::-1]
extension = []
for s, e in pairs:
if layers[s] == "__":
extension += layers[s + 1 : e][::-1] # noqa
else:
extension += [layers[s]] + layers[s + 1 : e][::-1] # noqa
return extension
|
Create the extension to the `original` list to make the laminate symmetric.
The position of the comments is taken into account.
|
Create the extension to the `original` list to make the laminate symmetric.
The position of the comments is taken into account.
|
[
"Create",
"the",
"extension",
"to",
"the",
"`",
"original",
"`",
"list",
"to",
"make",
"the",
"laminate",
"symmetric",
".",
"The",
"position",
"of",
"the",
"comments",
"is",
"taken",
"into",
"account",
"."
] |
def _extended(original):
if sum(1 for la in original if isinstance(la, str)) == 0:
return original[::-1]
layers = copy.deepcopy(original)
if not isinstance(layers[-1], str):
layers.append("__")
if not isinstance(layers[0], str):
layers.insert(0, "unknown")
idx = [n for n, v in enumerate(layers) if isinstance(v, str)]
pairs = list(zip(idx[:-1], idx[1:]))[::-1]
extension = []
for s, e in pairs:
if layers[s] == "__":
extension += layers[s + 1 : e][::-1]
else:
extension += [layers[s]] + layers[s + 1 : e][::-1]
return extension
|
[
"def",
"_extended",
"(",
"original",
")",
":",
"if",
"sum",
"(",
"1",
"for",
"la",
"in",
"original",
"if",
"isinstance",
"(",
"la",
",",
"str",
")",
")",
"==",
"0",
":",
"return",
"original",
"[",
":",
":",
"-",
"1",
"]",
"layers",
"=",
"copy",
".",
"deepcopy",
"(",
"original",
")",
"if",
"not",
"isinstance",
"(",
"layers",
"[",
"-",
"1",
"]",
",",
"str",
")",
":",
"layers",
".",
"append",
"(",
"\"__\"",
")",
"if",
"not",
"isinstance",
"(",
"layers",
"[",
"0",
"]",
",",
"str",
")",
":",
"layers",
".",
"insert",
"(",
"0",
",",
"\"unknown\"",
")",
"idx",
"=",
"[",
"n",
"for",
"n",
",",
"v",
"in",
"enumerate",
"(",
"layers",
")",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
"]",
"pairs",
"=",
"list",
"(",
"zip",
"(",
"idx",
"[",
":",
"-",
"1",
"]",
",",
"idx",
"[",
"1",
":",
"]",
")",
")",
"[",
":",
":",
"-",
"1",
"]",
"extension",
"=",
"[",
"]",
"for",
"s",
",",
"e",
"in",
"pairs",
":",
"if",
"layers",
"[",
"s",
"]",
"==",
"\"__\"",
":",
"extension",
"+=",
"layers",
"[",
"s",
"+",
"1",
":",
"e",
"]",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"extension",
"+=",
"[",
"layers",
"[",
"s",
"]",
"]",
"+",
"layers",
"[",
"s",
"+",
"1",
":",
"e",
"]",
"[",
":",
":",
"-",
"1",
"]",
"return",
"extension"
] |
Create the extension to the `original` list to make the laminate symmetric.
|
[
"Create",
"the",
"extension",
"to",
"the",
"`",
"original",
"`",
"list",
"to",
"make",
"the",
"laminate",
"symmetric",
"."
] |
[
"\"\"\"\n Create the extension to the `original` list to make the laminate symmetric.\n The position of the comments is taken into account.\n \"\"\"",
"# noqa",
"# noqa"
] |
[
{
"param": "original",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "original",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import copy
def _extended(original):
if sum(1 for la in original if isinstance(la, str)) == 0:
return original[::-1]
layers = copy.deepcopy(original)
if not isinstance(layers[-1], str):
layers.append("__")
if not isinstance(layers[0], str):
layers.insert(0, "unknown")
idx = [n for n, v in enumerate(layers) if isinstance(v, str)]
pairs = list(zip(idx[:-1], idx[1:]))[::-1]
extension = []
for s, e in pairs:
if layers[s] == "__":
extension += layers[s + 1 : e][::-1]
else:
extension += [layers[s]] + layers[s + 1 : e][::-1]
return extension
| 1,432 | 32 |
a69d1b0c796c3d164dd50bf9012df3e9eea847bb
|
tivaliy/iot-device-simulator
|
cloud_iot_mqtt.py
|
[
"MIT"
] |
Python
|
create_from_client_id
|
<not_specific>
|
def create_from_client_id(cls, client_id):
"""
Creates a Device from client_id string.
:param client_id: client_id as a string.
"""
return cls(*client_id.split("/")[1::2])
|
Creates a Device from client_id string.
:param client_id: client_id as a string.
|
Creates a Device from client_id string.
|
[
"Creates",
"a",
"Device",
"from",
"client_id",
"string",
"."
] |
def create_from_client_id(cls, client_id):
return cls(*client_id.split("/")[1::2])
|
[
"def",
"create_from_client_id",
"(",
"cls",
",",
"client_id",
")",
":",
"return",
"cls",
"(",
"*",
"client_id",
".",
"split",
"(",
"\"/\"",
")",
"[",
"1",
":",
":",
"2",
"]",
")"
] |
Creates a Device from client_id string.
|
[
"Creates",
"a",
"Device",
"from",
"client_id",
"string",
"."
] |
[
"\"\"\"\n Creates a Device from client_id string.\n\n :param client_id: client_id as a string.\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "client_id",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "client_id",
"type": null,
"docstring": "client_id as a string.",
"docstring_tokens": [
"client_id",
"as",
"a",
"string",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def create_from_client_id(cls, client_id):
return cls(*client_id.split("/")[1::2])
| 1,433 | 212 |
c402a8e3e43342a54fd74444f6080ab99455a9e2
|
daisukixci/macos_security
|
scripts/generate_guidance.py
|
[
"CC-BY-4.0"
] |
Python
|
is_asciidoctor_pdf_installed
|
<not_specific>
|
def is_asciidoctor_pdf_installed():
"""Checks to see if the ruby gem for asciidoctor-pdf is installed
"""
#cmd = "gem list asciidoctor-pdf -i"
cmd = "which asciidoctor-pdf"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return output.decode("utf-8")
|
Checks to see if the ruby gem for asciidoctor-pdf is installed
|
Checks to see if the ruby gem for asciidoctor-pdf is installed
|
[
"Checks",
"to",
"see",
"if",
"the",
"ruby",
"gem",
"for",
"asciidoctor",
"-",
"pdf",
"is",
"installed"
] |
def is_asciidoctor_pdf_installed():
cmd = "which asciidoctor-pdf"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return output.decode("utf-8")
|
[
"def",
"is_asciidoctor_pdf_installed",
"(",
")",
":",
"cmd",
"=",
"\"which asciidoctor-pdf\"",
"process",
"=",
"subprocess",
".",
"Popen",
"(",
"cmd",
".",
"split",
"(",
")",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
")",
"output",
",",
"error",
"=",
"process",
".",
"communicate",
"(",
")",
"return",
"output",
".",
"decode",
"(",
"\"utf-8\"",
")"
] |
Checks to see if the ruby gem for asciidoctor-pdf is installed
|
[
"Checks",
"to",
"see",
"if",
"the",
"ruby",
"gem",
"for",
"asciidoctor",
"-",
"pdf",
"is",
"installed"
] |
[
"\"\"\"Checks to see if the ruby gem for asciidoctor-pdf is installed\n \"\"\"",
"#cmd = \"gem list asciidoctor-pdf -i\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import subprocess
def is_asciidoctor_pdf_installed():
cmd = "which asciidoctor-pdf"
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
return output.decode("utf-8")
| 1,434 | 390 |
058c8c0ef496aad3a4583a68a9d2ebfe4dea54fb
|
itinghuang/iting-projects
|
stancode_projects/Name searching system/babynames.py
|
[
"MIT"
] |
Python
|
add_data_for_name
| null |
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
if name in name_data:
if year in name_data[name]:
ranking = name_data[name][year] # the original ranking in the dict
if ranking > rank:
name_data[name][year] = rank #change the rank to the new one
else:
name_data[name][year] = ranking
else:
name_data[name][year] = rank
else:
name_data[name] = {year: rank}
|
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
|
Adds the given year and rank to the associated name in the name_data dict.
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
|
[
"Adds",
"the",
"given",
"year",
"and",
"rank",
"to",
"the",
"associated",
"name",
"in",
"the",
"name_data",
"dict",
".",
"This",
"function",
"modifies",
"the",
"name_data",
"dict",
"to",
"store",
"the",
"provided",
"name",
"year",
"and",
"rank",
".",
"This",
"function",
"does",
"not",
"return",
"any",
"values",
"."
] |
def add_data_for_name(name_data, year, rank, name):
if name in name_data:
if year in name_data[name]:
ranking = name_data[name][year]
if ranking > rank:
name_data[name][year] = rank
else:
name_data[name][year] = ranking
else:
name_data[name][year] = rank
else:
name_data[name] = {year: rank}
|
[
"def",
"add_data_for_name",
"(",
"name_data",
",",
"year",
",",
"rank",
",",
"name",
")",
":",
"if",
"name",
"in",
"name_data",
":",
"if",
"year",
"in",
"name_data",
"[",
"name",
"]",
":",
"ranking",
"=",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"if",
"ranking",
">",
"rank",
":",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"=",
"rank",
"else",
":",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"=",
"ranking",
"else",
":",
"name_data",
"[",
"name",
"]",
"[",
"year",
"]",
"=",
"rank",
"else",
":",
"name_data",
"[",
"name",
"]",
"=",
"{",
"year",
":",
"rank",
"}"
] |
Adds the given year and rank to the associated name in the name_data dict.
|
[
"Adds",
"the",
"given",
"year",
"and",
"rank",
"to",
"the",
"associated",
"name",
"in",
"the",
"name_data",
"dict",
"."
] |
[
"\"\"\"\n Adds the given year and rank to the associated name in the name_data dict.\n\n Input:\n name_data (dict): dict holding baby name data\n year (str): the year of the data entry to add\n rank (str): the rank of the data entry to add\n name (str): the name of the data entry to add\n\n Output:\n This function modifies the name_data dict to store the provided\n name, year, and rank. This function does not return any values.\n\n \"\"\"",
"# the original ranking in the dict",
"#change the rank to the new one"
] |
[
{
"param": "name_data",
"type": null
},
{
"param": "year",
"type": null
},
{
"param": "rank",
"type": null
},
{
"param": "name",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "name_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "year",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "rank",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "name",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_data_for_name(name_data, year, rank, name):
if name in name_data:
if year in name_data[name]:
ranking = name_data[name][year]
if ranking > rank:
name_data[name][year] = rank
else:
name_data[name][year] = ranking
else:
name_data[name][year] = rank
else:
name_data[name] = {year: rank}
| 1,435 | 547 |
0e700759145a0cf3461a1b6af354e241b3a21651
|
kbyando/cinema-stein_unpack
|
sub20_to_binary.py
|
[
"Apache-2.0"
] |
Python
|
packl
|
<not_specific>
|
def packl(lnum, padmultiple=1):
"""Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long."""
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s
|
Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long.
|
Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size. 0
means no padding whatsoever, so that packing 0 result in an empty
string. The resulting byte string is the big-endian two's
complement representation of the passed in long.
|
[
"Packs",
"the",
"lnum",
"(",
"which",
"must",
"be",
"convertable",
"to",
"a",
"long",
")",
"into",
"a",
"byte",
"string",
"0",
"padded",
"to",
"a",
"multiple",
"of",
"padmultiple",
"bytes",
"in",
"size",
".",
"0",
"means",
"no",
"padding",
"whatsoever",
"so",
"that",
"packing",
"0",
"result",
"in",
"an",
"empty",
"string",
".",
"The",
"resulting",
"byte",
"string",
"is",
"the",
"big",
"-",
"endian",
"two",
"'",
"s",
"complement",
"representation",
"of",
"the",
"passed",
"in",
"long",
"."
] |
def packl(lnum, padmultiple=1):
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s
|
[
"def",
"packl",
"(",
"lnum",
",",
"padmultiple",
"=",
"1",
")",
":",
"if",
"lnum",
"==",
"0",
":",
"return",
"b'\\0'",
"*",
"padmultiple",
"elif",
"lnum",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Can only convert non-negative numbers.\"",
")",
"s",
"=",
"hex",
"(",
"lnum",
")",
"[",
"2",
":",
"]",
"s",
"=",
"s",
".",
"rstrip",
"(",
"'L'",
")",
"if",
"len",
"(",
"s",
")",
"&",
"1",
":",
"s",
"=",
"'0'",
"+",
"s",
"s",
"=",
"binascii",
".",
"unhexlify",
"(",
"s",
")",
"if",
"(",
"padmultiple",
"!=",
"1",
")",
"and",
"(",
"padmultiple",
"!=",
"0",
")",
":",
"filled_so_far",
"=",
"len",
"(",
"s",
")",
"%",
"padmultiple",
"if",
"filled_so_far",
"!=",
"0",
":",
"s",
"=",
"b'\\0'",
"*",
"(",
"padmultiple",
"-",
"filled_so_far",
")",
"+",
"s",
"return",
"s"
] |
Packs the lnum (which must be convertable to a long) into a
byte string 0 padded to a multiple of padmultiple bytes in size.
|
[
"Packs",
"the",
"lnum",
"(",
"which",
"must",
"be",
"convertable",
"to",
"a",
"long",
")",
"into",
"a",
"byte",
"string",
"0",
"padded",
"to",
"a",
"multiple",
"of",
"padmultiple",
"bytes",
"in",
"size",
"."
] |
[
"\"\"\"Packs the lnum (which must be convertable to a long) into a\n byte string 0 padded to a multiple of padmultiple bytes in size. 0\n means no padding whatsoever, so that packing 0 result in an empty\n string. The resulting byte string is the big-endian two's\n complement representation of the passed in long.\"\"\""
] |
[
{
"param": "lnum",
"type": null
},
{
"param": "padmultiple",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "lnum",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "padmultiple",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import binascii
def packl(lnum, padmultiple=1):
if lnum == 0:
return b'\0' * padmultiple
elif lnum < 0:
raise ValueError("Can only convert non-negative numbers.")
s = hex(lnum)[2:]
s = s.rstrip('L')
if len(s) & 1:
s = '0' + s
s = binascii.unhexlify(s)
if (padmultiple != 1) and (padmultiple != 0):
filled_so_far = len(s) % padmultiple
if filled_so_far != 0:
s = b'\0' * (padmultiple - filled_so_far) + s
return s
| 1,436 | 174 |
f1a34002382984fc5a8bc03e202c3965754db40f
|
itsayellow/finddup
|
src/finddup/finddup.py
|
[
"MIT"
] |
Python
|
recurse_subtree
|
<not_specific>
|
def recurse_subtree(name, subtree, dir_dict, fileblocks):
"""Recurse subtree of filetree, at each dir saving dir data id, size.
Directories are handled after files, because the ID string for a dir
is based on the dir/file IDs hierarchically contained in that dir.
Recursion causes lowest leaf dirs to be ID'ed first
Every collection of dir ID string components are alphabetized to ensure
the same order for the same set of file IDs.
Saves dir IDs into dir_dict. Saves dir size in blocks into fileblocks.
Example:
dir A contains: dir B, file C (ID: 345)
dir B contains: file D (ID: 401), file E (ID: 405)
ID string for dir B: [401,405]
ID string for dir A: [[401,405],345]
Args:
name: name of filepath of this directory
subtree: dict in filetree of this directory
dir_dict: READ/WRITE key: hier_id_str, item: list of dir paths
with this ID string
fileblocks: READ/WRITE dict with key: filepath, item: size in blocks
Returns:
hier_id_str: string based only on fileids of files/dirs inside
dir, specifying all fileids of files/dirs inside this dir
hierarchically down to lowest levels
"""
itemlist = []
dir_blocks = 0
for key in subtree.keys():
# key is name of dir/file inside of this dir
if isinstance(subtree[key], dict):
item = recurse_subtree(
os.path.join(name, key), subtree[key], dir_dict, fileblocks
)
else:
item = str(subtree[key])
dir_blocks += fileblocks[os.path.join(name, key)]
itemlist.append(item)
# put file blocks back into fileblocks db
fileblocks[name] = dir_blocks
# if any one item is "-1" (unknown file) then this whole directory is "-1"
# in this way we mark every subdir above unknown file as unknown
if "-1" in itemlist:
hier_id_str = "-1"
else:
itemlist.sort()
hier_id_str = "[" + ",".join(itemlist) + "]"
dir_dict.setdefault(hier_id_str, []).append(name)
return hier_id_str
|
Recurse subtree of filetree, at each dir saving dir data id, size.
Directories are handled after files, because the ID string for a dir
is based on the dir/file IDs hierarchically contained in that dir.
Recursion causes lowest leaf dirs to be ID'ed first
Every collection of dir ID string components are alphabetized to ensure
the same order for the same set of file IDs.
Saves dir IDs into dir_dict. Saves dir size in blocks into fileblocks.
Example:
dir A contains: dir B, file C (ID: 345)
dir B contains: file D (ID: 401), file E (ID: 405)
ID string for dir B: [401,405]
ID string for dir A: [[401,405],345]
Args:
name: name of filepath of this directory
subtree: dict in filetree of this directory
dir_dict: READ/WRITE key: hier_id_str, item: list of dir paths
with this ID string
fileblocks: READ/WRITE dict with key: filepath, item: size in blocks
Returns:
hier_id_str: string based only on fileids of files/dirs inside
dir, specifying all fileids of files/dirs inside this dir
hierarchically down to lowest levels
|
Recurse subtree of filetree, at each dir saving dir data id, size.
Directories are handled after files, because the ID string for a dir
is based on the dir/file IDs hierarchically contained in that dir.
Recursion causes lowest leaf dirs to be ID'ed first
Every collection of dir ID string components are alphabetized to ensure
the same order for the same set of file IDs.
|
[
"Recurse",
"subtree",
"of",
"filetree",
"at",
"each",
"dir",
"saving",
"dir",
"data",
"id",
"size",
".",
"Directories",
"are",
"handled",
"after",
"files",
"because",
"the",
"ID",
"string",
"for",
"a",
"dir",
"is",
"based",
"on",
"the",
"dir",
"/",
"file",
"IDs",
"hierarchically",
"contained",
"in",
"that",
"dir",
".",
"Recursion",
"causes",
"lowest",
"leaf",
"dirs",
"to",
"be",
"ID",
"'",
"ed",
"first",
"Every",
"collection",
"of",
"dir",
"ID",
"string",
"components",
"are",
"alphabetized",
"to",
"ensure",
"the",
"same",
"order",
"for",
"the",
"same",
"set",
"of",
"file",
"IDs",
"."
] |
def recurse_subtree(name, subtree, dir_dict, fileblocks):
itemlist = []
dir_blocks = 0
for key in subtree.keys():
if isinstance(subtree[key], dict):
item = recurse_subtree(
os.path.join(name, key), subtree[key], dir_dict, fileblocks
)
else:
item = str(subtree[key])
dir_blocks += fileblocks[os.path.join(name, key)]
itemlist.append(item)
fileblocks[name] = dir_blocks
if "-1" in itemlist:
hier_id_str = "-1"
else:
itemlist.sort()
hier_id_str = "[" + ",".join(itemlist) + "]"
dir_dict.setdefault(hier_id_str, []).append(name)
return hier_id_str
|
[
"def",
"recurse_subtree",
"(",
"name",
",",
"subtree",
",",
"dir_dict",
",",
"fileblocks",
")",
":",
"itemlist",
"=",
"[",
"]",
"dir_blocks",
"=",
"0",
"for",
"key",
"in",
"subtree",
".",
"keys",
"(",
")",
":",
"if",
"isinstance",
"(",
"subtree",
"[",
"key",
"]",
",",
"dict",
")",
":",
"item",
"=",
"recurse_subtree",
"(",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"key",
")",
",",
"subtree",
"[",
"key",
"]",
",",
"dir_dict",
",",
"fileblocks",
")",
"else",
":",
"item",
"=",
"str",
"(",
"subtree",
"[",
"key",
"]",
")",
"dir_blocks",
"+=",
"fileblocks",
"[",
"os",
".",
"path",
".",
"join",
"(",
"name",
",",
"key",
")",
"]",
"itemlist",
".",
"append",
"(",
"item",
")",
"fileblocks",
"[",
"name",
"]",
"=",
"dir_blocks",
"if",
"\"-1\"",
"in",
"itemlist",
":",
"hier_id_str",
"=",
"\"-1\"",
"else",
":",
"itemlist",
".",
"sort",
"(",
")",
"hier_id_str",
"=",
"\"[\"",
"+",
"\",\"",
".",
"join",
"(",
"itemlist",
")",
"+",
"\"]\"",
"dir_dict",
".",
"setdefault",
"(",
"hier_id_str",
",",
"[",
"]",
")",
".",
"append",
"(",
"name",
")",
"return",
"hier_id_str"
] |
Recurse subtree of filetree, at each dir saving dir data id, size.
|
[
"Recurse",
"subtree",
"of",
"filetree",
"at",
"each",
"dir",
"saving",
"dir",
"data",
"id",
"size",
"."
] |
[
"\"\"\"Recurse subtree of filetree, at each dir saving dir data id, size.\n\n Directories are handled after files, because the ID string for a dir\n is based on the dir/file IDs hierarchically contained in that dir.\n\n Recursion causes lowest leaf dirs to be ID'ed first\n\n Every collection of dir ID string components are alphabetized to ensure\n the same order for the same set of file IDs.\n\n Saves dir IDs into dir_dict. Saves dir size in blocks into fileblocks.\n Example:\n dir A contains: dir B, file C (ID: 345)\n dir B contains: file D (ID: 401), file E (ID: 405)\n ID string for dir B: [401,405]\n ID string for dir A: [[401,405],345]\n\n Args:\n name: name of filepath of this directory\n subtree: dict in filetree of this directory\n dir_dict: READ/WRITE key: hier_id_str, item: list of dir paths\n with this ID string\n fileblocks: READ/WRITE dict with key: filepath, item: size in blocks\n\n Returns:\n hier_id_str: string based only on fileids of files/dirs inside\n dir, specifying all fileids of files/dirs inside this dir\n hierarchically down to lowest levels\n \"\"\"",
"# key is name of dir/file inside of this dir",
"# put file blocks back into fileblocks db",
"# if any one item is \"-1\" (unknown file) then this whole directory is \"-1\"",
"# in this way we mark every subdir above unknown file as unknown"
] |
[
{
"param": "name",
"type": null
},
{
"param": "subtree",
"type": null
},
{
"param": "dir_dict",
"type": null
},
{
"param": "fileblocks",
"type": null
}
] |
{
"returns": [
{
"docstring": "string based only on fileids of files/dirs inside\ndir, specifying all fileids of files/dirs inside this dir\nhierarchically down to lowest levels",
"docstring_tokens": [
"string",
"based",
"only",
"on",
"fileids",
"of",
"files",
"/",
"dirs",
"inside",
"dir",
"specifying",
"all",
"fileids",
"of",
"files",
"/",
"dirs",
"inside",
"this",
"dir",
"hierarchically",
"down",
"to",
"lowest",
"levels"
],
"type": "hier_id_str"
}
],
"raises": [],
"params": [
{
"identifier": "name",
"type": null,
"docstring": "name of filepath of this directory",
"docstring_tokens": [
"name",
"of",
"filepath",
"of",
"this",
"directory"
],
"default": null,
"is_optional": null
},
{
"identifier": "subtree",
"type": null,
"docstring": "dict in filetree of this directory",
"docstring_tokens": [
"dict",
"in",
"filetree",
"of",
"this",
"directory"
],
"default": null,
"is_optional": null
},
{
"identifier": "dir_dict",
"type": null,
"docstring": "READ/WRITE key: hier_id_str, item: list of dir paths\nwith this ID string",
"docstring_tokens": [
"READ",
"/",
"WRITE",
"key",
":",
"hier_id_str",
"item",
":",
"list",
"of",
"dir",
"paths",
"with",
"this",
"ID",
"string"
],
"default": null,
"is_optional": null
},
{
"identifier": "fileblocks",
"type": null,
"docstring": "READ/WRITE dict with key: filepath, item: size in blocks",
"docstring_tokens": [
"READ",
"/",
"WRITE",
"dict",
"with",
"key",
":",
"filepath",
"item",
":",
"size",
"in",
"blocks"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": [
{
"identifier": "examples",
"docstring": null,
"docstring_tokens": [
"None"
]
}
]
}
|
import os
def recurse_subtree(name, subtree, dir_dict, fileblocks):
itemlist = []
dir_blocks = 0
for key in subtree.keys():
if isinstance(subtree[key], dict):
item = recurse_subtree(
os.path.join(name, key), subtree[key], dir_dict, fileblocks
)
else:
item = str(subtree[key])
dir_blocks += fileblocks[os.path.join(name, key)]
itemlist.append(item)
fileblocks[name] = dir_blocks
if "-1" in itemlist:
hier_id_str = "-1"
else:
itemlist.sort()
hier_id_str = "[" + ",".join(itemlist) + "]"
dir_dict.setdefault(hier_id_str, []).append(name)
return hier_id_str
| 1,437 | 539 |
12526ebaf0c8b3d2de484dfdd0ed40c0e26556e2
|
kdbalabanov/fastapi-crypto-market-data-rest-api
|
app/api/apiutils.py
|
[
"MIT"
] |
Python
|
add_pct_change
| null |
def add_pct_change(df: pandas.DataFrame, column_name: str):
"""
Add a percentage change column to a pandas.DataFrame given a column name to base the computation on
:param df: The pandas.DataFrame that is to be modified
:param column_name: Column name to base the computation of % change on
"""
df['% change'] = df[column_name].pct_change() * 100
df.fillna(value=0.00, inplace=True)
|
Add a percentage change column to a pandas.DataFrame given a column name to base the computation on
:param df: The pandas.DataFrame that is to be modified
:param column_name: Column name to base the computation of % change on
|
Add a percentage change column to a pandas.DataFrame given a column name to base the computation on
|
[
"Add",
"a",
"percentage",
"change",
"column",
"to",
"a",
"pandas",
".",
"DataFrame",
"given",
"a",
"column",
"name",
"to",
"base",
"the",
"computation",
"on"
] |
def add_pct_change(df: pandas.DataFrame, column_name: str):
df['% change'] = df[column_name].pct_change() * 100
df.fillna(value=0.00, inplace=True)
|
[
"def",
"add_pct_change",
"(",
"df",
":",
"pandas",
".",
"DataFrame",
",",
"column_name",
":",
"str",
")",
":",
"df",
"[",
"'% change'",
"]",
"=",
"df",
"[",
"column_name",
"]",
".",
"pct_change",
"(",
")",
"*",
"100",
"df",
".",
"fillna",
"(",
"value",
"=",
"0.00",
",",
"inplace",
"=",
"True",
")"
] |
Add a percentage change column to a pandas.DataFrame given a column name to base the computation on
|
[
"Add",
"a",
"percentage",
"change",
"column",
"to",
"a",
"pandas",
".",
"DataFrame",
"given",
"a",
"column",
"name",
"to",
"base",
"the",
"computation",
"on"
] |
[
"\"\"\"\n Add a percentage change column to a pandas.DataFrame given a column name to base the computation on\n\n :param df: The pandas.DataFrame that is to be modified\n :param column_name: Column name to base the computation of % change on\n \"\"\""
] |
[
{
"param": "df",
"type": "pandas.DataFrame"
},
{
"param": "column_name",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": "pandas.DataFrame",
"docstring": "The pandas.DataFrame that is to be modified",
"docstring_tokens": [
"The",
"pandas",
".",
"DataFrame",
"that",
"is",
"to",
"be",
"modified"
],
"default": null,
"is_optional": null
},
{
"identifier": "column_name",
"type": "str",
"docstring": "Column name to base the computation of % change on",
"docstring_tokens": [
"Column",
"name",
"to",
"base",
"the",
"computation",
"of",
"%",
"change",
"on"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def add_pct_change(df: pandas.DataFrame, column_name: str):
df['% change'] = df[column_name].pct_change() * 100
df.fillna(value=0.00, inplace=True)
| 1,438 | 206 |
31f5ce19dfc1674ad9e172cdb406b6fe68adc28c
|
biolab/orange3-datasets
|
orangecontrib/datasets/api_wrapper.py
|
[
"MIT"
] |
Python
|
_get_iso_date
|
<not_specific>
|
def _get_iso_date(date_str):
"""Convert wbd date string into iso format date string.
Convert date strings such as "2005", "2002Q3" and "1999M7" into iso
formatted date strings "yyyy-mm-dd"
"""
try:
if "Q" in date_str:
year, quarter = date_str.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
elif "M" in date_str:
year, month = date_str.split("M")
return datetime.date(int(year), int(month), 1)
else:
return datetime.date(int(date_str), 1, 1).isoformat()
except ValueError:
# some dates contain invalid date strings such as
# "Last Known Value" or "1988-2000" and possible some more. See:
# http://api.worldbank.org/countries/PRY/indicators/
# per_lm_ac.avt_q4_urb?date=1960%3A2016&format=json
# &per_page=10000
# http://api.worldbank.org/countries/all/indicators/
# DB_mw_19apprentice?format=json&mrv=10&gapfill=y
return datetime.date.today().isoformat()
|
Convert wbd date string into iso format date string.
Convert date strings such as "2005", "2002Q3" and "1999M7" into iso
formatted date strings "yyyy-mm-dd"
|
Convert wbd date string into iso format date string.
|
[
"Convert",
"wbd",
"date",
"string",
"into",
"iso",
"format",
"date",
"string",
"."
] |
def _get_iso_date(date_str):
try:
if "Q" in date_str:
year, quarter = date_str.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
elif "M" in date_str:
year, month = date_str.split("M")
return datetime.date(int(year), int(month), 1)
else:
return datetime.date(int(date_str), 1, 1).isoformat()
except ValueError:
return datetime.date.today().isoformat()
|
[
"def",
"_get_iso_date",
"(",
"date_str",
")",
":",
"try",
":",
"if",
"\"Q\"",
"in",
"date_str",
":",
"year",
",",
"quarter",
"=",
"date_str",
".",
"split",
"(",
"\"Q\"",
")",
"return",
"datetime",
".",
"date",
"(",
"int",
"(",
"year",
")",
",",
"(",
"int",
"(",
"quarter",
")",
"*",
"3",
")",
"-",
"2",
",",
"1",
")",
"elif",
"\"M\"",
"in",
"date_str",
":",
"year",
",",
"month",
"=",
"date_str",
".",
"split",
"(",
"\"M\"",
")",
"return",
"datetime",
".",
"date",
"(",
"int",
"(",
"year",
")",
",",
"int",
"(",
"month",
")",
",",
"1",
")",
"else",
":",
"return",
"datetime",
".",
"date",
"(",
"int",
"(",
"date_str",
")",
",",
"1",
",",
"1",
")",
".",
"isoformat",
"(",
")",
"except",
"ValueError",
":",
"return",
"datetime",
".",
"date",
".",
"today",
"(",
")",
".",
"isoformat",
"(",
")"
] |
Convert wbd date string into iso format date string.
|
[
"Convert",
"wbd",
"date",
"string",
"into",
"iso",
"format",
"date",
"string",
"."
] |
[
"\"\"\"Convert wbd date string into iso format date string.\n\n Convert date strings such as \"2005\", \"2002Q3\" and \"1999M7\" into iso\n formatted date strings \"yyyy-mm-dd\"\n \"\"\"",
"# some dates contain invalid date strings such as",
"# \"Last Known Value\" or \"1988-2000\" and possible some more. See:",
"# http://api.worldbank.org/countries/PRY/indicators/",
"# per_lm_ac.avt_q4_urb?date=1960%3A2016&format=json",
"# &per_page=10000",
"# http://api.worldbank.org/countries/all/indicators/",
"# DB_mw_19apprentice?format=json&mrv=10&gapfill=y"
] |
[
{
"param": "date_str",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "date_str",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import datetime
def _get_iso_date(date_str):
try:
if "Q" in date_str:
year, quarter = date_str.split("Q")
return datetime.date(int(year), (int(quarter) * 3) - 2, 1)
elif "M" in date_str:
year, month = date_str.split("M")
return datetime.date(int(year), int(month), 1)
else:
return datetime.date(int(date_str), 1, 1).isoformat()
except ValueError:
return datetime.date.today().isoformat()
| 1,439 | 557 |
ef64c9f2a30dd4243fb4ac875a130001d5ef42a8
|
cossatot/halfspace
|
halfspace/projections.py
|
[
"BSD-3-Clause"
] |
Python
|
first_tensor_invariant
|
<not_specific>
|
def first_tensor_invariant(A):
"""
Calculates the first tensor invariant of a symmetric 3x3 matrix.
Returns a scalar.
"""
return A[0,0] + A[1,1] + A[2,2]
|
Calculates the first tensor invariant of a symmetric 3x3 matrix.
Returns a scalar.
|
Calculates the first tensor invariant of a symmetric 3x3 matrix.
Returns a scalar.
|
[
"Calculates",
"the",
"first",
"tensor",
"invariant",
"of",
"a",
"symmetric",
"3x3",
"matrix",
".",
"Returns",
"a",
"scalar",
"."
] |
def first_tensor_invariant(A):
return A[0,0] + A[1,1] + A[2,2]
|
[
"def",
"first_tensor_invariant",
"(",
"A",
")",
":",
"return",
"A",
"[",
"0",
",",
"0",
"]",
"+",
"A",
"[",
"1",
",",
"1",
"]",
"+",
"A",
"[",
"2",
",",
"2",
"]"
] |
Calculates the first tensor invariant of a symmetric 3x3 matrix.
|
[
"Calculates",
"the",
"first",
"tensor",
"invariant",
"of",
"a",
"symmetric",
"3x3",
"matrix",
"."
] |
[
"\"\"\"\n Calculates the first tensor invariant of a symmetric 3x3 matrix.\n\n Returns a scalar.\n \"\"\""
] |
[
{
"param": "A",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "A",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def first_tensor_invariant(A):
return A[0,0] + A[1,1] + A[2,2]
| 1,440 | 449 |
0a3845708a2f673a61b329be3525c6a6cb0e6529
|
AlexanderKalistratov/hpat
|
tests_perf/runner.py
|
[
"BSD-2-Clause"
] |
Python
|
dump_results
| null |
def dump_results(results, file_path):
"""Dump benchmarking results to json-file"""
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w', encoding='utf-8') as fd:
json.dump(results, fd)
|
Dump benchmarking results to json-file
|
Dump benchmarking results to json-file
|
[
"Dump",
"benchmarking",
"results",
"to",
"json",
"-",
"file"
] |
def dump_results(results, file_path):
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w', encoding='utf-8') as fd:
json.dump(results, fd)
|
[
"def",
"dump_results",
"(",
"results",
",",
"file_path",
")",
":",
"file_path",
".",
"parent",
".",
"mkdir",
"(",
"parents",
"=",
"True",
",",
"exist_ok",
"=",
"True",
")",
"with",
"file_path",
".",
"open",
"(",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"fd",
":",
"json",
".",
"dump",
"(",
"results",
",",
"fd",
")"
] |
Dump benchmarking results to json-file
|
[
"Dump",
"benchmarking",
"results",
"to",
"json",
"-",
"file"
] |
[
"\"\"\"Dump benchmarking results to json-file\"\"\""
] |
[
{
"param": "results",
"type": null
},
{
"param": "file_path",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "results",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_path",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def dump_results(results, file_path):
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w', encoding='utf-8') as fd:
json.dump(results, fd)
| 1,443 | 681 |
5d4bf3d15ab31b56451d19b69a70e9ce0c44079b
|
SmirkyGraphs/ri-state-payroll
|
src/pipeline.py
|
[
"MIT"
] |
Python
|
unique_name_key
|
<not_specific>
|
def unique_name_key(df):
"""
without any sort of "employee id" or key to lookup, the best we have is
labeling people by first-m-last name. This works fine for just the GO,
however when used with all departments you run into some duplicates.
"""
df['uid'] = df['first'] + df['m'] + df['last']
return df
|
without any sort of "employee id" or key to lookup, the best we have is
labeling people by first-m-last name. This works fine for just the GO,
however when used with all departments you run into some duplicates.
|
without any sort of "employee id" or key to lookup, the best we have is
labeling people by first-m-last name. This works fine for just the GO,
however when used with all departments you run into some duplicates.
|
[
"without",
"any",
"sort",
"of",
"\"",
"employee",
"id",
"\"",
"or",
"key",
"to",
"lookup",
"the",
"best",
"we",
"have",
"is",
"labeling",
"people",
"by",
"first",
"-",
"m",
"-",
"last",
"name",
".",
"This",
"works",
"fine",
"for",
"just",
"the",
"GO",
"however",
"when",
"used",
"with",
"all",
"departments",
"you",
"run",
"into",
"some",
"duplicates",
"."
] |
def unique_name_key(df):
df['uid'] = df['first'] + df['m'] + df['last']
return df
|
[
"def",
"unique_name_key",
"(",
"df",
")",
":",
"df",
"[",
"'uid'",
"]",
"=",
"df",
"[",
"'first'",
"]",
"+",
"df",
"[",
"'m'",
"]",
"+",
"df",
"[",
"'last'",
"]",
"return",
"df"
] |
without any sort of "employee id" or key to lookup, the best we have is
labeling people by first-m-last name.
|
[
"without",
"any",
"sort",
"of",
"\"",
"employee",
"id",
"\"",
"or",
"key",
"to",
"lookup",
"the",
"best",
"we",
"have",
"is",
"labeling",
"people",
"by",
"first",
"-",
"m",
"-",
"last",
"name",
"."
] |
[
"\"\"\"\n without any sort of \"employee id\" or key to lookup, the best we have is\n labeling people by first-m-last name. This works fine for just the GO, \n however when used with all departments you run into some duplicates. \n \"\"\""
] |
[
{
"param": "df",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "df",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def unique_name_key(df):
df['uid'] = df['first'] + df['m'] + df['last']
return df
| 1,444 | 387 |
6a06d361c8bf10c3a5e44bced1559353a9f620c8
|
Summer-Ronin/Python-Fundamentals-for-Django
|
Level_One/Exercises/a_simple_game.py
|
[
"CNRI-Python"
] |
Python
|
check_guess
|
<not_specific>
|
def check_guess(comp_numb, user_guess):
"""
Compare user guess with computer gen number
Input: computer gen number, user guess number
Output: a statement
"""
if comp_numb == user_guess:
return 'BREAK'
|
Compare user guess with computer gen number
Input: computer gen number, user guess number
Output: a statement
|
Compare user guess with computer gen number
Input: computer gen number, user guess number
Output: a statement
|
[
"Compare",
"user",
"guess",
"with",
"computer",
"gen",
"number",
"Input",
":",
"computer",
"gen",
"number",
"user",
"guess",
"number",
"Output",
":",
"a",
"statement"
] |
def check_guess(comp_numb, user_guess):
if comp_numb == user_guess:
return 'BREAK'
|
[
"def",
"check_guess",
"(",
"comp_numb",
",",
"user_guess",
")",
":",
"if",
"comp_numb",
"==",
"user_guess",
":",
"return",
"'BREAK'"
] |
Compare user guess with computer gen number
Input: computer gen number, user guess number
Output: a statement
|
[
"Compare",
"user",
"guess",
"with",
"computer",
"gen",
"number",
"Input",
":",
"computer",
"gen",
"number",
"user",
"guess",
"number",
"Output",
":",
"a",
"statement"
] |
[
"\"\"\"\n Compare user guess with computer gen number\n Input: computer gen number, user guess number\n Output: a statement\n \"\"\""
] |
[
{
"param": "comp_numb",
"type": null
},
{
"param": "user_guess",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "comp_numb",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "user_guess",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def check_guess(comp_numb, user_guess):
if comp_numb == user_guess:
return 'BREAK'
| 1,445 | 572 |
df1f4094a1033deeda7cee5614cfc10704ef79e7
|
zaber-paul/base
|
cloudmesh_base/debug.py
|
[
"Apache-2.0"
] |
Python
|
WHERE
|
<not_specific>
|
def WHERE(back=0):
"""
Prints information about where this function is called, including filename and line number as well as function.
:param back: the context of the call, typically you can omit
"""
frame = sys._getframe(back + 1)
return "{0} {1} {2}()".format(os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
frame.f_code.co_name)
|
Prints information about where this function is called, including filename and line number as well as function.
:param back: the context of the call, typically you can omit
|
Prints information about where this function is called, including filename and line number as well as function.
|
[
"Prints",
"information",
"about",
"where",
"this",
"function",
"is",
"called",
"including",
"filename",
"and",
"line",
"number",
"as",
"well",
"as",
"function",
"."
] |
def WHERE(back=0):
frame = sys._getframe(back + 1)
return "{0} {1} {2}()".format(os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
frame.f_code.co_name)
|
[
"def",
"WHERE",
"(",
"back",
"=",
"0",
")",
":",
"frame",
"=",
"sys",
".",
"_getframe",
"(",
"back",
"+",
"1",
")",
"return",
"\"{0} {1} {2}()\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"basename",
"(",
"frame",
".",
"f_code",
".",
"co_filename",
")",
",",
"frame",
".",
"f_lineno",
",",
"frame",
".",
"f_code",
".",
"co_name",
")"
] |
Prints information about where this function is called, including filename and line number as well as function.
|
[
"Prints",
"information",
"about",
"where",
"this",
"function",
"is",
"called",
"including",
"filename",
"and",
"line",
"number",
"as",
"well",
"as",
"function",
"."
] |
[
"\"\"\"\n Prints information about where this function is called, including filename and line number as well as function.\n \n :param back: the context of the call, typically you can omit\n \"\"\""
] |
[
{
"param": "back",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "back",
"type": null,
"docstring": "the context of the call, typically you can omit",
"docstring_tokens": [
"the",
"context",
"of",
"the",
"call",
"typically",
"you",
"can",
"omit"
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import sys
import os
def WHERE(back=0):
frame = sys._getframe(back + 1)
return "{0} {1} {2}()".format(os.path.basename(frame.f_code.co_filename),
frame.f_lineno,
frame.f_code.co_name)
| 1,446 | 653 |
e6489c597dabfb375cc876712048bc632b416a90
|
calm-cookie/multiwoz-to-hindi
|
hindi.py
|
[
"MIT"
] |
Python
|
write_to_file
| null |
def write_to_file(json_data, path_to_file):
'''
Write the contents to a file
'''
f = open(path_to_file, 'w')
json.dump(json_data, f, indent=2)
f.close()
|
Write the contents to a file
|
Write the contents to a file
|
[
"Write",
"the",
"contents",
"to",
"a",
"file"
] |
def write_to_file(json_data, path_to_file):
f = open(path_to_file, 'w')
json.dump(json_data, f, indent=2)
f.close()
|
[
"def",
"write_to_file",
"(",
"json_data",
",",
"path_to_file",
")",
":",
"f",
"=",
"open",
"(",
"path_to_file",
",",
"'w'",
")",
"json",
".",
"dump",
"(",
"json_data",
",",
"f",
",",
"indent",
"=",
"2",
")",
"f",
".",
"close",
"(",
")"
] |
Write the contents to a file
|
[
"Write",
"the",
"contents",
"to",
"a",
"file"
] |
[
"'''\r\n Write the contents to a file\r\n '''"
] |
[
{
"param": "json_data",
"type": null
},
{
"param": "path_to_file",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "json_data",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "path_to_file",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import json
def write_to_file(json_data, path_to_file):
f = open(path_to_file, 'w')
json.dump(json_data, f, indent=2)
f.close()
| 1,447 | 157 |
1c2b96c9e94e311f4a24da718b549b59fea95823
|
ChrisKeefe/Network-Analysis-Made-Simple
|
nams/solutions/io.py
|
[
"MIT"
] |
Python
|
filter_graph
|
<not_specific>
|
def filter_graph(G, minimum_num_trips):
"""
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
"""
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
if d["num_trips"] < minimum_num_trips:
G_filtered.remove_edge(u, v)
return G_filtered
|
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
|
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
|
[
"Filter",
"the",
"graph",
"such",
"that",
"only",
"edges",
"that",
"have",
"minimum_num_trips",
"or",
"more",
"are",
"present",
"."
] |
def filter_graph(G, minimum_num_trips):
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
if d["num_trips"] < minimum_num_trips:
G_filtered.remove_edge(u, v)
return G_filtered
|
[
"def",
"filter_graph",
"(",
"G",
",",
"minimum_num_trips",
")",
":",
"G_filtered",
"=",
"G",
".",
"copy",
"(",
")",
"for",
"u",
",",
"v",
",",
"d",
"in",
"G",
".",
"edges",
"(",
"data",
"=",
"True",
")",
":",
"if",
"d",
"[",
"\"num_trips\"",
"]",
"<",
"minimum_num_trips",
":",
"G_filtered",
".",
"remove_edge",
"(",
"u",
",",
"v",
")",
"return",
"G_filtered"
] |
Filter the graph such that
only edges that have minimum_num_trips or more
are present.
|
[
"Filter",
"the",
"graph",
"such",
"that",
"only",
"edges",
"that",
"have",
"minimum_num_trips",
"or",
"more",
"are",
"present",
"."
] |
[
"\"\"\"\n Filter the graph such that\n only edges that have minimum_num_trips or more\n are present.\n \"\"\""
] |
[
{
"param": "G",
"type": null
},
{
"param": "minimum_num_trips",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "G",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "minimum_num_trips",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def filter_graph(G, minimum_num_trips):
G_filtered = G.copy()
for u, v, d in G.edges(data=True):
if d["num_trips"] < minimum_num_trips:
G_filtered.remove_edge(u, v)
return G_filtered
| 1,449 | 714 |
bd8dbe363744352999da127c76f099a0a6c9f67d
|
alehpineda/bitesofpy
|
191/bmi1.py
|
[
"MIT"
] |
Python
|
calc_bmi
|
<not_specific>
|
def calc_bmi(weight, length):
"""Provided/DONE:
Calc BMI give a weight in kg and length in cm, return the BMI
rounded on 2 decimals"""
bmi = float(weight) / ((float(length) / 100) ** 2)
return round(bmi, 2)
|
Provided/DONE:
Calc BMI give a weight in kg and length in cm, return the BMI
rounded on 2 decimals
|
Provided/DONE:
Calc BMI give a weight in kg and length in cm, return the BMI
rounded on 2 decimals
|
[
"Provided",
"/",
"DONE",
":",
"Calc",
"BMI",
"give",
"a",
"weight",
"in",
"kg",
"and",
"length",
"in",
"cm",
"return",
"the",
"BMI",
"rounded",
"on",
"2",
"decimals"
] |
def calc_bmi(weight, length):
bmi = float(weight) / ((float(length) / 100) ** 2)
return round(bmi, 2)
|
[
"def",
"calc_bmi",
"(",
"weight",
",",
"length",
")",
":",
"bmi",
"=",
"float",
"(",
"weight",
")",
"/",
"(",
"(",
"float",
"(",
"length",
")",
"/",
"100",
")",
"**",
"2",
")",
"return",
"round",
"(",
"bmi",
",",
"2",
")"
] |
Provided/DONE:
Calc BMI give a weight in kg and length in cm, return the BMI
rounded on 2 decimals
|
[
"Provided",
"/",
"DONE",
":",
"Calc",
"BMI",
"give",
"a",
"weight",
"in",
"kg",
"and",
"length",
"in",
"cm",
"return",
"the",
"BMI",
"rounded",
"on",
"2",
"decimals"
] |
[
"\"\"\"Provided/DONE:\n Calc BMI give a weight in kg and length in cm, return the BMI\n rounded on 2 decimals\"\"\""
] |
[
{
"param": "weight",
"type": null
},
{
"param": "length",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "weight",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "length",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def calc_bmi(weight, length):
bmi = float(weight) / ((float(length) / 100) ** 2)
return round(bmi, 2)
| 1,450 | 743 |
525a9d500efa159c54c1104c7cf25cd60afd63b8
|
nprezant/traveling-salesman
|
mlga/evolution.py
|
[
"MIT"
] |
Python
|
mutate_child
| null |
def mutate_child(child, chance):
'''Mutates each gene in child with a chance of chance
Requires that the gene has a mutation and copy method.'''
if random.random() < chance:
gene_index = random.choice(range(len(child.genes)))
new_gene = child.genes[gene_index].copy()
new_gene.mutate()
child.genes[gene_index] = new_gene
child.clear_fitness()
|
Mutates each gene in child with a chance of chance
Requires that the gene has a mutation and copy method.
|
Mutates each gene in child with a chance of chance
Requires that the gene has a mutation and copy method.
|
[
"Mutates",
"each",
"gene",
"in",
"child",
"with",
"a",
"chance",
"of",
"chance",
"Requires",
"that",
"the",
"gene",
"has",
"a",
"mutation",
"and",
"copy",
"method",
"."
] |
def mutate_child(child, chance):
if random.random() < chance:
gene_index = random.choice(range(len(child.genes)))
new_gene = child.genes[gene_index].copy()
new_gene.mutate()
child.genes[gene_index] = new_gene
child.clear_fitness()
|
[
"def",
"mutate_child",
"(",
"child",
",",
"chance",
")",
":",
"if",
"random",
".",
"random",
"(",
")",
"<",
"chance",
":",
"gene_index",
"=",
"random",
".",
"choice",
"(",
"range",
"(",
"len",
"(",
"child",
".",
"genes",
")",
")",
")",
"new_gene",
"=",
"child",
".",
"genes",
"[",
"gene_index",
"]",
".",
"copy",
"(",
")",
"new_gene",
".",
"mutate",
"(",
")",
"child",
".",
"genes",
"[",
"gene_index",
"]",
"=",
"new_gene",
"child",
".",
"clear_fitness",
"(",
")"
] |
Mutates each gene in child with a chance of chance
Requires that the gene has a mutation and copy method.
|
[
"Mutates",
"each",
"gene",
"in",
"child",
"with",
"a",
"chance",
"of",
"chance",
"Requires",
"that",
"the",
"gene",
"has",
"a",
"mutation",
"and",
"copy",
"method",
"."
] |
[
"'''Mutates each gene in child with a chance of chance\n Requires that the gene has a mutation and copy method.'''"
] |
[
{
"param": "child",
"type": null
},
{
"param": "chance",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "child",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "chance",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import random
def mutate_child(child, chance):
if random.random() < chance:
gene_index = random.choice(range(len(child.genes)))
new_gene = child.genes[gene_index].copy()
new_gene.mutate()
child.genes[gene_index] = new_gene
child.clear_fitness()
| 1,451 | 934 |
25e380e1c347826ebee26a20f1b1880ced72e1ff
|
isuruf/pythran
|
pythran/tests/euler/euler36.py
|
[
"BSD-3-Clause"
] |
Python
|
solve
|
<not_specific>
|
def solve():
'''
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
'''
def ispalindrome(n, base):
digits = []
reverse = []
while n > 0:
d = str(n % base)
digits.append(d)
reverse.insert(0, d)
n = n // base
return digits == reverse
return sum(n for n in range(1, 1000000)
if ispalindrome(n, 10) and ispalindrome(n, 2))
|
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
|
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.
(Please note that the palindromic number, in either base, may not include leading zeros.)
|
[
"The",
"decimal",
"number",
"585",
"=",
"10010010012",
"(",
"binary",
")",
"is",
"palindromic",
"in",
"both",
"bases",
".",
"Find",
"the",
"sum",
"of",
"all",
"numbers",
"less",
"than",
"one",
"million",
"which",
"are",
"palindromic",
"in",
"base",
"10",
"and",
"base",
"2",
".",
"(",
"Please",
"note",
"that",
"the",
"palindromic",
"number",
"in",
"either",
"base",
"may",
"not",
"include",
"leading",
"zeros",
".",
")"
] |
def solve():
def ispalindrome(n, base):
digits = []
reverse = []
while n > 0:
d = str(n % base)
digits.append(d)
reverse.insert(0, d)
n = n // base
return digits == reverse
return sum(n for n in range(1, 1000000)
if ispalindrome(n, 10) and ispalindrome(n, 2))
|
[
"def",
"solve",
"(",
")",
":",
"def",
"ispalindrome",
"(",
"n",
",",
"base",
")",
":",
"digits",
"=",
"[",
"]",
"reverse",
"=",
"[",
"]",
"while",
"n",
">",
"0",
":",
"d",
"=",
"str",
"(",
"n",
"%",
"base",
")",
"digits",
".",
"append",
"(",
"d",
")",
"reverse",
".",
"insert",
"(",
"0",
",",
"d",
")",
"n",
"=",
"n",
"//",
"base",
"return",
"digits",
"==",
"reverse",
"return",
"sum",
"(",
"n",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"1000000",
")",
"if",
"ispalindrome",
"(",
"n",
",",
"10",
")",
"and",
"ispalindrome",
"(",
"n",
",",
"2",
")",
")"
] |
The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.
|
[
"The",
"decimal",
"number",
"585",
"=",
"10010010012",
"(",
"binary",
")",
"is",
"palindromic",
"in",
"both",
"bases",
"."
] |
[
"'''\n The decimal number, 585 = 10010010012 (binary), is palindromic in both bases.\n\n Find the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.\n\n (Please note that the palindromic number, in either base, may not include leading zeros.)\n '''"
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def solve():
def ispalindrome(n, base):
digits = []
reverse = []
while n > 0:
d = str(n % base)
digits.append(d)
reverse.insert(0, d)
n = n // base
return digits == reverse
return sum(n for n in range(1, 1000000)
if ispalindrome(n, 10) and ispalindrome(n, 2))
| 1,452 | 697 |
b0738d318a50951c12716d8081ebc1e62c84f2e1
|
vyahello/upgrade-python-kata
|
kata/06/element_equals_index.py
|
[
"MIT"
] |
Python
|
index_search
|
int
|
def index_search(array: Sequence[int], low: int, high: int) -> int:
"""Searches index of an array by given low and high values."""
if low > high:
return -1
median: int = math.floor(high + low / 2)
if array[median] == median:
left_value: int = index_search(array, low, median - 1)
if left_value != -1 and left_value < median:
return left_value
return median
if array[median] > median:
return index_search(array, low, median - 1)
return index_search(array, median + 1, high)
|
Searches index of an array by given low and high values.
|
Searches index of an array by given low and high values.
|
[
"Searches",
"index",
"of",
"an",
"array",
"by",
"given",
"low",
"and",
"high",
"values",
"."
] |
def index_search(array: Sequence[int], low: int, high: int) -> int:
if low > high:
return -1
median: int = math.floor(high + low / 2)
if array[median] == median:
left_value: int = index_search(array, low, median - 1)
if left_value != -1 and left_value < median:
return left_value
return median
if array[median] > median:
return index_search(array, low, median - 1)
return index_search(array, median + 1, high)
|
[
"def",
"index_search",
"(",
"array",
":",
"Sequence",
"[",
"int",
"]",
",",
"low",
":",
"int",
",",
"high",
":",
"int",
")",
"->",
"int",
":",
"if",
"low",
">",
"high",
":",
"return",
"-",
"1",
"median",
":",
"int",
"=",
"math",
".",
"floor",
"(",
"high",
"+",
"low",
"/",
"2",
")",
"if",
"array",
"[",
"median",
"]",
"==",
"median",
":",
"left_value",
":",
"int",
"=",
"index_search",
"(",
"array",
",",
"low",
",",
"median",
"-",
"1",
")",
"if",
"left_value",
"!=",
"-",
"1",
"and",
"left_value",
"<",
"median",
":",
"return",
"left_value",
"return",
"median",
"if",
"array",
"[",
"median",
"]",
">",
"median",
":",
"return",
"index_search",
"(",
"array",
",",
"low",
",",
"median",
"-",
"1",
")",
"return",
"index_search",
"(",
"array",
",",
"median",
"+",
"1",
",",
"high",
")"
] |
Searches index of an array by given low and high values.
|
[
"Searches",
"index",
"of",
"an",
"array",
"by",
"given",
"low",
"and",
"high",
"values",
"."
] |
[
"\"\"\"Searches index of an array by given low and high values.\"\"\""
] |
[
{
"param": "array",
"type": "Sequence[int]"
},
{
"param": "low",
"type": "int"
},
{
"param": "high",
"type": "int"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "array",
"type": "Sequence[int]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "low",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "high",
"type": "int",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import math
def index_search(array: Sequence[int], low: int, high: int) -> int:
if low > high:
return -1
median: int = math.floor(high + low / 2)
if array[median] == median:
left_value: int = index_search(array, low, median - 1)
if left_value != -1 and left_value < median:
return left_value
return median
if array[median] > median:
return index_search(array, low, median - 1)
return index_search(array, median + 1, high)
| 1,453 | 509 |
33619321ea9902085acc85a81148cd61606f912c
|
arghya05/EPAi
|
12. Packages/calculator/util.py
|
[
"MIT"
] |
Python
|
f_string_print
|
'Function'
|
def f_string_print(fn:'Function')->'Function':
"""
This is a closure to print the result of the function which is
passed as an argument .
"""
def inner(*args,**kwargs):
output = fn(*args,**kwargs)
print(f'Function: {fn.__name__},Arguments:{args} and Output:{output}')
return output
return inner
|
This is a closure to print the result of the function which is
passed as an argument .
|
This is a closure to print the result of the function which is
passed as an argument .
|
[
"This",
"is",
"a",
"closure",
"to",
"print",
"the",
"result",
"of",
"the",
"function",
"which",
"is",
"passed",
"as",
"an",
"argument",
"."
] |
def f_string_print(fn:'Function')->'Function':
def inner(*args,**kwargs):
output = fn(*args,**kwargs)
print(f'Function: {fn.__name__},Arguments:{args} and Output:{output}')
return output
return inner
|
[
"def",
"f_string_print",
"(",
"fn",
":",
"'Function'",
")",
"->",
"'Function'",
":",
"def",
"inner",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
":",
"output",
"=",
"fn",
"(",
"*",
"args",
",",
"**",
"kwargs",
")",
"print",
"(",
"f'Function: {fn.__name__},Arguments:{args} and Output:{output}'",
")",
"return",
"output",
"return",
"inner"
] |
This is a closure to print the result of the function which is
passed as an argument .
|
[
"This",
"is",
"a",
"closure",
"to",
"print",
"the",
"result",
"of",
"the",
"function",
"which",
"is",
"passed",
"as",
"an",
"argument",
"."
] |
[
"\"\"\"\n This is a closure to print the result of the function which is \n passed as an argument .\n \"\"\""
] |
[
{
"param": "fn",
"type": "'Function'"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "fn",
"type": "'Function'",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def f_string_print(fn:'Function')->'Function':
def inner(*args,**kwargs):
output = fn(*args,**kwargs)
print(f'Function: {fn.__name__},Arguments:{args} and Output:{output}')
return output
return inner
| 1,454 | 611 |
ffd7458bc01e8e46251baef3b245335dabb92c93
|
przemekkot/publishing-boy
|
publishing_boy/plugins.py
|
[
"MIT"
] |
Python
|
creation_date
|
<not_specific>
|
def creation_date(obj):
"""Extract date when the file was
created.
@return: 'date', date(YYYY-mm-dd HH:MM:SS)"""
_, _, abspath, _ = obj
return 'cdate', datetime.fromtimestamp(os.path.getctime(abspath))
|
Extract date when the file was
created.
@return: 'date', date(YYYY-mm-dd HH:MM:SS)
|
Extract date when the file was
created.
|
[
"Extract",
"date",
"when",
"the",
"file",
"was",
"created",
"."
] |
def creation_date(obj):
_, _, abspath, _ = obj
return 'cdate', datetime.fromtimestamp(os.path.getctime(abspath))
|
[
"def",
"creation_date",
"(",
"obj",
")",
":",
"_",
",",
"_",
",",
"abspath",
",",
"_",
"=",
"obj",
"return",
"'cdate'",
",",
"datetime",
".",
"fromtimestamp",
"(",
"os",
".",
"path",
".",
"getctime",
"(",
"abspath",
")",
")"
] |
Extract date when the file was
created.
|
[
"Extract",
"date",
"when",
"the",
"file",
"was",
"created",
"."
] |
[
"\"\"\"Extract date when the file was\n created.\n\n @return: 'date', date(YYYY-mm-dd HH:MM:SS)\"\"\""
] |
[
{
"param": "obj",
"type": null
}
] |
{
"returns": [
{
"docstring": "'date', date(YYYY-mm-dd HH:MM:SS)",
"docstring_tokens": [
"'",
"date",
"'",
"date",
"(",
"YYYY",
"-",
"mm",
"-",
"dd",
"HH",
":",
"MM",
":",
"SS",
")"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "obj",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
import datetime
def creation_date(obj):
_, _, abspath, _ = obj
return 'cdate', datetime.fromtimestamp(os.path.getctime(abspath))
| 1,455 | 941 |
840219eb8acac19ff578f1bfd7c4044052083ebd
|
chebee7i/buhmm
|
buhmm/canonical.py
|
[
"MIT"
] |
Python
|
verify_orders
| null |
def verify_orders(machine, node_order, symbol_order):
"""
Helper function to verify node and symbol orders.
Users can call this before make_canonical(), if they want assurances.
This requires one pass through all the edges in the machine.
"""
nodes = set(machine.nodes())
nodes_ = set(node_order)
if nodes != nodes_:
raise Exception("Invalid node order.")
symbols = machine.alphabet()
symbols_ = set(symbol_order)
if symbols != symbols_:
raise Exception("Invalid symbol order.")
|
Helper function to verify node and symbol orders.
Users can call this before make_canonical(), if they want assurances.
This requires one pass through all the edges in the machine.
|
Helper function to verify node and symbol orders.
Users can call this before make_canonical(), if they want assurances.
This requires one pass through all the edges in the machine.
|
[
"Helper",
"function",
"to",
"verify",
"node",
"and",
"symbol",
"orders",
".",
"Users",
"can",
"call",
"this",
"before",
"make_canonical",
"()",
"if",
"they",
"want",
"assurances",
".",
"This",
"requires",
"one",
"pass",
"through",
"all",
"the",
"edges",
"in",
"the",
"machine",
"."
] |
def verify_orders(machine, node_order, symbol_order):
nodes = set(machine.nodes())
nodes_ = set(node_order)
if nodes != nodes_:
raise Exception("Invalid node order.")
symbols = machine.alphabet()
symbols_ = set(symbol_order)
if symbols != symbols_:
raise Exception("Invalid symbol order.")
|
[
"def",
"verify_orders",
"(",
"machine",
",",
"node_order",
",",
"symbol_order",
")",
":",
"nodes",
"=",
"set",
"(",
"machine",
".",
"nodes",
"(",
")",
")",
"nodes_",
"=",
"set",
"(",
"node_order",
")",
"if",
"nodes",
"!=",
"nodes_",
":",
"raise",
"Exception",
"(",
"\"Invalid node order.\"",
")",
"symbols",
"=",
"machine",
".",
"alphabet",
"(",
")",
"symbols_",
"=",
"set",
"(",
"symbol_order",
")",
"if",
"symbols",
"!=",
"symbols_",
":",
"raise",
"Exception",
"(",
"\"Invalid symbol order.\"",
")"
] |
Helper function to verify node and symbol orders.
|
[
"Helper",
"function",
"to",
"verify",
"node",
"and",
"symbol",
"orders",
"."
] |
[
"\"\"\"\n Helper function to verify node and symbol orders.\n\n Users can call this before make_canonical(), if they want assurances.\n\n This requires one pass through all the edges in the machine.\n\n \"\"\""
] |
[
{
"param": "machine",
"type": null
},
{
"param": "node_order",
"type": null
},
{
"param": "symbol_order",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "machine",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "node_order",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "symbol_order",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def verify_orders(machine, node_order, symbol_order):
nodes = set(machine.nodes())
nodes_ = set(node_order)
if nodes != nodes_:
raise Exception("Invalid node order.")
symbols = machine.alphabet()
symbols_ = set(symbol_order)
if symbols != symbols_:
raise Exception("Invalid symbol order.")
| 1,456 | 210 |
bfe8e792f27fd6a06fbb5978827fef372de6ceb1
|
dentearl/evolverSimControl
|
src/libSimControl.py
|
[
"MIT"
] |
Python
|
extractLeafsFromNewick
|
<not_specific>
|
def extractLeafsFromNewick(nt, leafDict):
"""Given a newick tree object, it returns a dict of
leaf objects. Operates recursively.
"""
if nt is None:
return None
nt.distance = 0.0
if nt.right is None and nt.left is None:
leafDict[nt.iD] = True
else:
extractLeafsFromNewick(nt.right, leafDict = leafDict)
extractLeafsFromNewick(nt.left , leafDict = leafDict)
|
Given a newick tree object, it returns a dict of
leaf objects. Operates recursively.
|
Given a newick tree object, it returns a dict of
leaf objects. Operates recursively.
|
[
"Given",
"a",
"newick",
"tree",
"object",
"it",
"returns",
"a",
"dict",
"of",
"leaf",
"objects",
".",
"Operates",
"recursively",
"."
] |
def extractLeafsFromNewick(nt, leafDict):
if nt is None:
return None
nt.distance = 0.0
if nt.right is None and nt.left is None:
leafDict[nt.iD] = True
else:
extractLeafsFromNewick(nt.right, leafDict = leafDict)
extractLeafsFromNewick(nt.left , leafDict = leafDict)
|
[
"def",
"extractLeafsFromNewick",
"(",
"nt",
",",
"leafDict",
")",
":",
"if",
"nt",
"is",
"None",
":",
"return",
"None",
"nt",
".",
"distance",
"=",
"0.0",
"if",
"nt",
".",
"right",
"is",
"None",
"and",
"nt",
".",
"left",
"is",
"None",
":",
"leafDict",
"[",
"nt",
".",
"iD",
"]",
"=",
"True",
"else",
":",
"extractLeafsFromNewick",
"(",
"nt",
".",
"right",
",",
"leafDict",
"=",
"leafDict",
")",
"extractLeafsFromNewick",
"(",
"nt",
".",
"left",
",",
"leafDict",
"=",
"leafDict",
")"
] |
Given a newick tree object, it returns a dict of
leaf objects.
|
[
"Given",
"a",
"newick",
"tree",
"object",
"it",
"returns",
"a",
"dict",
"of",
"leaf",
"objects",
"."
] |
[
"\"\"\"Given a newick tree object, it returns a dict of\n leaf objects. Operates recursively.\n \"\"\""
] |
[
{
"param": "nt",
"type": null
},
{
"param": "leafDict",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "nt",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "leafDict",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def extractLeafsFromNewick(nt, leafDict):
if nt is None:
return None
nt.distance = 0.0
if nt.right is None and nt.left is None:
leafDict[nt.iD] = True
else:
extractLeafsFromNewick(nt.right, leafDict = leafDict)
extractLeafsFromNewick(nt.left , leafDict = leafDict)
| 1,457 | 864 |
4dadba87cb576798d26bfcadb757c964e505d04d
|
khushboobhatia01/st2
|
st2common/st2common/util/pack.py
|
[
"Apache-2.0"
] |
Python
|
normalize_pack_version
|
<not_specific>
|
def normalize_pack_version(version):
"""
Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid
semver version string (0.2.0).
:rtype: ``str``
"""
version = str(version)
version_seperator_count = version.count(".")
if version_seperator_count == 1:
version = version + ".0"
return version
|
Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid
semver version string (0.2.0).
:rtype: ``str``
|
Normalize old, pre StackStorm v2.1 non valid semver version string to a valid
semver version string (0.2.0).
|
[
"Normalize",
"old",
"pre",
"StackStorm",
"v2",
".",
"1",
"non",
"valid",
"semver",
"version",
"string",
"to",
"a",
"valid",
"semver",
"version",
"string",
"(",
"0",
".",
"2",
".",
"0",
")",
"."
] |
def normalize_pack_version(version):
version = str(version)
version_seperator_count = version.count(".")
if version_seperator_count == 1:
version = version + ".0"
return version
|
[
"def",
"normalize_pack_version",
"(",
"version",
")",
":",
"version",
"=",
"str",
"(",
"version",
")",
"version_seperator_count",
"=",
"version",
".",
"count",
"(",
"\".\"",
")",
"if",
"version_seperator_count",
"==",
"1",
":",
"version",
"=",
"version",
"+",
"\".0\"",
"return",
"version"
] |
Normalize old, pre StackStorm v2.1 non valid semver version string (e.g.
|
[
"Normalize",
"old",
"pre",
"StackStorm",
"v2",
".",
"1",
"non",
"valid",
"semver",
"version",
"string",
"(",
"e",
".",
"g",
"."
] |
[
"\"\"\"\n Normalize old, pre StackStorm v2.1 non valid semver version string (e.g. 0.2) to a valid\n semver version string (0.2.0).\n\n :rtype: ``str``\n \"\"\""
] |
[
{
"param": "version",
"type": null
}
] |
{
"returns": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": "``str``"
}
],
"raises": [],
"params": [
{
"identifier": "version",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def normalize_pack_version(version):
version = str(version)
version_seperator_count = version.count(".")
if version_seperator_count == 1:
version = version + ".0"
return version
| 1,458 | 132 |
34194853f2cc3d1258831a60ccbb4642162fd54c
|
Venti-/willie
|
willie/module.py
|
[
"EFL-2.0"
] |
Python
|
intent
|
<not_specific>
|
def intent(*intent_list):
"""Make a callable trigger on a message with any of the given intents.
*Availability: 5.2.0+*
NOTE: Due to a bug, messages sent with the intent in the CTCP format
(rather than IRCv3 message tags) which do not include a message (for
example, a message like ``\\x01VERSION\\x01``) are not correctly parsed by
the bot, and as such do not get caught by this decorator. This erroneous
behavior is kept in 5.x for compatability, but will be rectified in 6.0.
Additionally in 5.x, a rule or command must be specified in addition to
this, even if it is just ``'.*``.
"""
def add_attribute(function):
if not hasattr(function, "intents"):
function.intents = []
function.intents.extend(intent_list)
return function
return add_attribute
|
Make a callable trigger on a message with any of the given intents.
*Availability: 5.2.0+*
NOTE: Due to a bug, messages sent with the intent in the CTCP format
(rather than IRCv3 message tags) which do not include a message (for
example, a message like ``\\x01VERSION\\x01``) are not correctly parsed by
the bot, and as such do not get caught by this decorator. This erroneous
behavior is kept in 5.x for compatability, but will be rectified in 6.0.
Additionally in 5.x, a rule or command must be specified in addition to
this, even if it is just ``'.*``.
|
Make a callable trigger on a message with any of the given intents.
Due to a bug, messages sent with the intent in the CTCP format
(rather than IRCv3 message tags) which do not include a message (for
example, a message like ``\\x01VERSION\\x01``) are not correctly parsed by
the bot, and as such do not get caught by this decorator. This erroneous
behavior is kept in 5.x for compatability, but will be rectified in 6.0.
Additionally in 5.x, a rule or command must be specified in addition to
this, even if it is just ``'.*``.
|
[
"Make",
"a",
"callable",
"trigger",
"on",
"a",
"message",
"with",
"any",
"of",
"the",
"given",
"intents",
".",
"Due",
"to",
"a",
"bug",
"messages",
"sent",
"with",
"the",
"intent",
"in",
"the",
"CTCP",
"format",
"(",
"rather",
"than",
"IRCv3",
"message",
"tags",
")",
"which",
"do",
"not",
"include",
"a",
"message",
"(",
"for",
"example",
"a",
"message",
"like",
"`",
"`",
"\\\\",
"x01VERSION",
"\\\\",
"x01",
"`",
"`",
")",
"are",
"not",
"correctly",
"parsed",
"by",
"the",
"bot",
"and",
"as",
"such",
"do",
"not",
"get",
"caught",
"by",
"this",
"decorator",
".",
"This",
"erroneous",
"behavior",
"is",
"kept",
"in",
"5",
".",
"x",
"for",
"compatability",
"but",
"will",
"be",
"rectified",
"in",
"6",
".",
"0",
".",
"Additionally",
"in",
"5",
".",
"x",
"a",
"rule",
"or",
"command",
"must",
"be",
"specified",
"in",
"addition",
"to",
"this",
"even",
"if",
"it",
"is",
"just",
"`",
"`",
"'",
".",
"*",
"`",
"`",
"."
] |
def intent(*intent_list):
def add_attribute(function):
if not hasattr(function, "intents"):
function.intents = []
function.intents.extend(intent_list)
return function
return add_attribute
|
[
"def",
"intent",
"(",
"*",
"intent_list",
")",
":",
"def",
"add_attribute",
"(",
"function",
")",
":",
"if",
"not",
"hasattr",
"(",
"function",
",",
"\"intents\"",
")",
":",
"function",
".",
"intents",
"=",
"[",
"]",
"function",
".",
"intents",
".",
"extend",
"(",
"intent_list",
")",
"return",
"function",
"return",
"add_attribute"
] |
Make a callable trigger on a message with any of the given intents.
|
[
"Make",
"a",
"callable",
"trigger",
"on",
"a",
"message",
"with",
"any",
"of",
"the",
"given",
"intents",
"."
] |
[
"\"\"\"Make a callable trigger on a message with any of the given intents.\n\n *Availability: 5.2.0+*\n\n NOTE: Due to a bug, messages sent with the intent in the CTCP format\n (rather than IRCv3 message tags) which do not include a message (for\n example, a message like ``\\\\x01VERSION\\\\x01``) are not correctly parsed by\n the bot, and as such do not get caught by this decorator. This erroneous\n behavior is kept in 5.x for compatability, but will be rectified in 6.0.\n\n Additionally in 5.x, a rule or command must be specified in addition to\n this, even if it is just ``'.*``.\n \"\"\""
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
def intent(*intent_list):
def add_attribute(function):
if not hasattr(function, "intents"):
function.intents = []
function.intents.extend(intent_list)
return function
return add_attribute
| 1,459 | 842 |
7617bf031ef89687b762f54b863b961ace8d77e7
|
cmeb45/YouTubePlaylist
|
bin/prod_playlists.py
|
[
"MIT"
] |
Python
|
is_official_channel
|
<not_specific>
|
def is_official_channel(user, user_search, artist_variations):
"""Check if a video comes from an official channel by the artist/label
"""
if user_search is not None or user in artist_variations:
return True
else:
return False
|
Check if a video comes from an official channel by the artist/label
|
Check if a video comes from an official channel by the artist/label
|
[
"Check",
"if",
"a",
"video",
"comes",
"from",
"an",
"official",
"channel",
"by",
"the",
"artist",
"/",
"label"
] |
def is_official_channel(user, user_search, artist_variations):
if user_search is not None or user in artist_variations:
return True
else:
return False
|
[
"def",
"is_official_channel",
"(",
"user",
",",
"user_search",
",",
"artist_variations",
")",
":",
"if",
"user_search",
"is",
"not",
"None",
"or",
"user",
"in",
"artist_variations",
":",
"return",
"True",
"else",
":",
"return",
"False"
] |
Check if a video comes from an official channel by the artist/label
|
[
"Check",
"if",
"a",
"video",
"comes",
"from",
"an",
"official",
"channel",
"by",
"the",
"artist",
"/",
"label"
] |
[
"\"\"\"Check if a video comes from an official channel by the artist/label\n \"\"\""
] |
[
{
"param": "user",
"type": null
},
{
"param": "user_search",
"type": null
},
{
"param": "artist_variations",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "user",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "user_search",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "artist_variations",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def is_official_channel(user, user_search, artist_variations):
if user_search is not None or user in artist_variations:
return True
else:
return False
| 1,460 | 455 |
61ab41bcefe19f127a9df1f6e860314c8745612e
|
Panaedra/panaedra_py_platform_base
|
src/panaedra/msroot/msutil/logic/sc_mspysys.py
|
[
"MIT"
] |
Python
|
AllObjectsToTrash
|
<not_specific>
|
def AllObjectsToTrash(cls, cDataIP):
'''Called by bridge.
Does a 'del' on all class objects.
Prevents core dumps at the end of an ABL session.
For manual investigation: use DumpAllObjects()
'''
tObjects=gc.get_objects()
for o in tObjects:
if getattr(o, "__class__", None):
del o
return ''
|
Called by bridge.
Does a 'del' on all class objects.
Prevents core dumps at the end of an ABL session.
For manual investigation: use DumpAllObjects()
|
Called by bridge.
Does a 'del' on all class objects.
Prevents core dumps at the end of an ABL session.
For manual investigation: use DumpAllObjects()
|
[
"Called",
"by",
"bridge",
".",
"Does",
"a",
"'",
"del",
"'",
"on",
"all",
"class",
"objects",
".",
"Prevents",
"core",
"dumps",
"at",
"the",
"end",
"of",
"an",
"ABL",
"session",
".",
"For",
"manual",
"investigation",
":",
"use",
"DumpAllObjects",
"()"
] |
def AllObjectsToTrash(cls, cDataIP):
tObjects=gc.get_objects()
for o in tObjects:
if getattr(o, "__class__", None):
del o
return ''
|
[
"def",
"AllObjectsToTrash",
"(",
"cls",
",",
"cDataIP",
")",
":",
"tObjects",
"=",
"gc",
".",
"get_objects",
"(",
")",
"for",
"o",
"in",
"tObjects",
":",
"if",
"getattr",
"(",
"o",
",",
"\"__class__\"",
",",
"None",
")",
":",
"del",
"o",
"return",
"''"
] |
Called by bridge.
|
[
"Called",
"by",
"bridge",
"."
] |
[
"'''Called by bridge. \r\n Does a 'del' on all class objects. \r\n Prevents core dumps at the end of an ABL session. \r\n For manual investigation: use DumpAllObjects()\r\n '''"
] |
[
{
"param": "cls",
"type": null
},
{
"param": "cDataIP",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "cDataIP",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import gc
def AllObjectsToTrash(cls, cDataIP):
tObjects=gc.get_objects()
for o in tObjects:
if getattr(o, "__class__", None):
del o
return ''
| 1,461 | 180 |
66ba9ca44e1a2ebf20f37d62517a6d08bf8d77eb
|
takeratta/pants
|
src/python/pants/scm/git.py
|
[
"Apache-2.0"
] |
Python
|
clone
|
<not_specific>
|
def clone(cls, repo_url, dest, binary='git'):
"""Clone the repo at repo_url into dest.
:param string binary: The path to the git binary to use, 'git' by default.
:returns: an instance of this class representing the cloned repo.
:rtype: Git
"""
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
|
Clone the repo at repo_url into dest.
:param string binary: The path to the git binary to use, 'git' by default.
:returns: an instance of this class representing the cloned repo.
:rtype: Git
|
Clone the repo at repo_url into dest.
|
[
"Clone",
"the",
"repo",
"at",
"repo_url",
"into",
"dest",
"."
] |
def clone(cls, repo_url, dest, binary='git'):
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
|
[
"def",
"clone",
"(",
"cls",
",",
"repo_url",
",",
"dest",
",",
"binary",
"=",
"'git'",
")",
":",
"cmd",
"=",
"[",
"binary",
",",
"'clone'",
",",
"repo_url",
",",
"dest",
"]",
"process",
",",
"out",
"=",
"cls",
".",
"_invoke",
"(",
"cmd",
")",
"cls",
".",
"_check_result",
"(",
"cmd",
",",
"process",
".",
"returncode",
")",
"return",
"cls",
"(",
"binary",
"=",
"binary",
",",
"worktree",
"=",
"dest",
")"
] |
Clone the repo at repo_url into dest.
|
[
"Clone",
"the",
"repo",
"at",
"repo_url",
"into",
"dest",
"."
] |
[
"\"\"\"Clone the repo at repo_url into dest.\n\n :param string binary: The path to the git binary to use, 'git' by default.\n :returns: an instance of this class representing the cloned repo.\n :rtype: Git\n \"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "repo_url",
"type": null
},
{
"param": "dest",
"type": null
},
{
"param": "binary",
"type": null
}
] |
{
"returns": [
{
"docstring": "an instance of this class representing the cloned repo.",
"docstring_tokens": [
"an",
"instance",
"of",
"this",
"class",
"representing",
"the",
"cloned",
"repo",
"."
],
"type": "Git"
}
],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "repo_url",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "dest",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "binary",
"type": null,
"docstring": "The path to the git binary to use, 'git' by default.",
"docstring_tokens": [
"The",
"path",
"to",
"the",
"git",
"binary",
"to",
"use",
"'",
"git",
"'",
"by",
"default",
"."
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def clone(cls, repo_url, dest, binary='git'):
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
| 1,462 | 504 |
fe930c0add211ff30c586e6a8fb2fac415e25793
|
changyu98/GAN-PyTorch
|
example/main.py
|
[
"Apache-2.0"
] |
Python
|
cal_file_md5
|
<not_specific>
|
def cal_file_md5(filename):
""" Calculates the MD5 value of the file
Args:
filename: The path name of the file.
Return:
The MD5 value of the file.
"""
with open(filename, "rb") as f:
md5 = hashlib.md5()
md5.update(f.read())
hash_value = md5.hexdigest()
return hash_value
|
Calculates the MD5 value of the file
Args:
filename: The path name of the file.
Return:
The MD5 value of the file.
|
Calculates the MD5 value of the file
|
[
"Calculates",
"the",
"MD5",
"value",
"of",
"the",
"file"
] |
def cal_file_md5(filename):
with open(filename, "rb") as f:
md5 = hashlib.md5()
md5.update(f.read())
hash_value = md5.hexdigest()
return hash_value
|
[
"def",
"cal_file_md5",
"(",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"f",
":",
"md5",
"=",
"hashlib",
".",
"md5",
"(",
")",
"md5",
".",
"update",
"(",
"f",
".",
"read",
"(",
")",
")",
"hash_value",
"=",
"md5",
".",
"hexdigest",
"(",
")",
"return",
"hash_value"
] |
Calculates the MD5 value of the file
|
[
"Calculates",
"the",
"MD5",
"value",
"of",
"the",
"file"
] |
[
"\"\"\" Calculates the MD5 value of the file\n Args:\n filename: The path name of the file.\n\n Return:\n The MD5 value of the file.\n\n \"\"\""
] |
[
{
"param": "filename",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "filename",
"type": null,
"docstring": "The path name of the file.",
"docstring_tokens": [
"The",
"path",
"name",
"of",
"the",
"file",
"."
],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import hashlib
def cal_file_md5(filename):
with open(filename, "rb") as f:
md5 = hashlib.md5()
md5.update(f.read())
hash_value = md5.hexdigest()
return hash_value
| 1,463 | 252 |
3e1a8537d9c3b90696aab84d5055953dedf921bf
|
silvafj/BBK-MSCCS-2017-18
|
POP1/assignment-two/Hundred.py
|
[
"MIT"
] |
Python
|
is_game_over
|
<not_specific>
|
def is_game_over(computer_score, human_score):
"""
Checks if the game is over.
:param int computer_score: Computer score
:param int human_score: Human score
:return: True if either player has 100 or more, and the players are not tied
:rtype: bool
"""
return computer_score != human_score and \
(computer_score >= 100 or human_score >= 100)
|
Checks if the game is over.
:param int computer_score: Computer score
:param int human_score: Human score
:return: True if either player has 100 or more, and the players are not tied
:rtype: bool
|
Checks if the game is over.
|
[
"Checks",
"if",
"the",
"game",
"is",
"over",
"."
] |
def is_game_over(computer_score, human_score):
return computer_score != human_score and \
(computer_score >= 100 or human_score >= 100)
|
[
"def",
"is_game_over",
"(",
"computer_score",
",",
"human_score",
")",
":",
"return",
"computer_score",
"!=",
"human_score",
"and",
"(",
"computer_score",
">=",
"100",
"or",
"human_score",
">=",
"100",
")"
] |
Checks if the game is over.
|
[
"Checks",
"if",
"the",
"game",
"is",
"over",
"."
] |
[
"\"\"\"\n Checks if the game is over.\n\n :param int computer_score: Computer score\n :param int human_score: Human score\n :return: True if either player has 100 or more, and the players are not tied\n :rtype: bool\n \"\"\""
] |
[
{
"param": "computer_score",
"type": null
},
{
"param": "human_score",
"type": null
}
] |
{
"returns": [
{
"docstring": "True if either player has 100 or more, and the players are not tied",
"docstring_tokens": [
"True",
"if",
"either",
"player",
"has",
"100",
"or",
"more",
"and",
"the",
"players",
"are",
"not",
"tied"
],
"type": "bool"
}
],
"raises": [],
"params": [
{
"identifier": "computer_score",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "human_score",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def is_game_over(computer_score, human_score):
return computer_score != human_score and \
(computer_score >= 100 or human_score >= 100)
| 1,464 | 176 |
aaa52d62385bae523e1c08828f003995b26436d0
|
landrito/api-client-staging
|
generated/python/gapic-google-cloud-spanner-v1/google/cloud/gapic/spanner/v1/spanner_client.py
|
[
"BSD-3-Clause"
] |
Python
|
session_path
|
<not_specific>
|
def session_path(cls, project, instance, database, session):
"""Returns a fully-qualified session resource name string."""
return cls._SESSION_PATH_TEMPLATE.render({
'project': project,
'instance': instance,
'database': database,
'session': session,
})
|
Returns a fully-qualified session resource name string.
|
Returns a fully-qualified session resource name string.
|
[
"Returns",
"a",
"fully",
"-",
"qualified",
"session",
"resource",
"name",
"string",
"."
] |
def session_path(cls, project, instance, database, session):
return cls._SESSION_PATH_TEMPLATE.render({
'project': project,
'instance': instance,
'database': database,
'session': session,
})
|
[
"def",
"session_path",
"(",
"cls",
",",
"project",
",",
"instance",
",",
"database",
",",
"session",
")",
":",
"return",
"cls",
".",
"_SESSION_PATH_TEMPLATE",
".",
"render",
"(",
"{",
"'project'",
":",
"project",
",",
"'instance'",
":",
"instance",
",",
"'database'",
":",
"database",
",",
"'session'",
":",
"session",
",",
"}",
")"
] |
Returns a fully-qualified session resource name string.
|
[
"Returns",
"a",
"fully",
"-",
"qualified",
"session",
"resource",
"name",
"string",
"."
] |
[
"\"\"\"Returns a fully-qualified session resource name string.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "project",
"type": null
},
{
"param": "instance",
"type": null
},
{
"param": "database",
"type": null
},
{
"param": "session",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "project",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "instance",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "database",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "session",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def session_path(cls, project, instance, database, session):
return cls._SESSION_PATH_TEMPLATE.render({
'project': project,
'instance': instance,
'database': database,
'session': session,
})
| 1,465 | 1,015 |
1612932ce6488d8d63b5c752fbeea096928d9c9d
|
Accelize/apyfal
|
apyfal/_pool_executor.py
|
[
"Apache-2.0"
] |
Python
|
_get_info_dict
|
<not_specific>
|
def _get_info_dict(info_list):
"""
Return info dict and append it in info list.
Args:
info_list (list or None): info list
Returns:
dict or None: info_dict
"""
if info_list is not None:
info_dict = dict()
info_list.append(info_dict)
return info_dict
return None
|
Return info dict and append it in info list.
Args:
info_list (list or None): info list
Returns:
dict or None: info_dict
|
Return info dict and append it in info list.
|
[
"Return",
"info",
"dict",
"and",
"append",
"it",
"in",
"info",
"list",
"."
] |
def _get_info_dict(info_list):
if info_list is not None:
info_dict = dict()
info_list.append(info_dict)
return info_dict
return None
|
[
"def",
"_get_info_dict",
"(",
"info_list",
")",
":",
"if",
"info_list",
"is",
"not",
"None",
":",
"info_dict",
"=",
"dict",
"(",
")",
"info_list",
".",
"append",
"(",
"info_dict",
")",
"return",
"info_dict",
"return",
"None"
] |
Return info dict and append it in info list.
|
[
"Return",
"info",
"dict",
"and",
"append",
"it",
"in",
"info",
"list",
"."
] |
[
"\"\"\"\n Return info dict and append it in info list.\n\n Args:\n info_list (list or None): info list\n\n Returns:\n dict or None: info_dict\n \"\"\""
] |
[
{
"param": "info_list",
"type": null
}
] |
{
"returns": [
{
"docstring": "dict or None: info_dict",
"docstring_tokens": [
"dict",
"or",
"None",
":",
"info_dict"
],
"type": null
}
],
"raises": [],
"params": [
{
"identifier": "info_list",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def _get_info_dict(info_list):
if info_list is not None:
info_dict = dict()
info_list.append(info_dict)
return info_dict
return None
| 1,466 | 103 |
9b16d23a9586560a7aad5978a5e0fb995bf8d4fd
|
hugocool/notion-sdk-py
|
notion_client/lib/api_objects.py
|
[
"MIT"
] |
Python
|
from_json
|
"PropertyValue"
|
def from_json(cls, d: Dict[str, Any]) -> "PropertyValue":
"""Create a Property Value from its JSON equaivalent."""
property_id = d.pop("id")
property_type = d.pop("type")
property_value = cls._from_json(d)
property_value.property_id = property_id
property_value.property_type = property_type
return property_value
|
Create a Property Value from its JSON equaivalent.
|
Create a Property Value from its JSON equaivalent.
|
[
"Create",
"a",
"Property",
"Value",
"from",
"its",
"JSON",
"equaivalent",
"."
] |
def from_json(cls, d: Dict[str, Any]) -> "PropertyValue":
property_id = d.pop("id")
property_type = d.pop("type")
property_value = cls._from_json(d)
property_value.property_id = property_id
property_value.property_type = property_type
return property_value
|
[
"def",
"from_json",
"(",
"cls",
",",
"d",
":",
"Dict",
"[",
"str",
",",
"Any",
"]",
")",
"->",
"\"PropertyValue\"",
":",
"property_id",
"=",
"d",
".",
"pop",
"(",
"\"id\"",
")",
"property_type",
"=",
"d",
".",
"pop",
"(",
"\"type\"",
")",
"property_value",
"=",
"cls",
".",
"_from_json",
"(",
"d",
")",
"property_value",
".",
"property_id",
"=",
"property_id",
"property_value",
".",
"property_type",
"=",
"property_type",
"return",
"property_value"
] |
Create a Property Value from its JSON equaivalent.
|
[
"Create",
"a",
"Property",
"Value",
"from",
"its",
"JSON",
"equaivalent",
"."
] |
[
"\"\"\"Create a Property Value from its JSON equaivalent.\"\"\""
] |
[
{
"param": "cls",
"type": null
},
{
"param": "d",
"type": "Dict[str, Any]"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "cls",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "d",
"type": "Dict[str, Any]",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def from_json(cls, d: Dict[str, Any]) -> "PropertyValue":
property_id = d.pop("id")
property_type = d.pop("type")
property_value = cls._from_json(d)
property_value.property_id = property_id
property_value.property_type = property_type
return property_value
| 1,467 | 823 |
a0894a3940745b4a9a06febd1e6f0ddc33e8d735
|
theChad/ThinkPython
|
chap13/analyze_book.py
|
[
"MIT"
] |
Python
|
binary_search
|
<not_specific>
|
def binary_search(sorted_list, target):
"""Find where a number lies in a sorted list. Return lowest item
that is greater than or equal to target, or None if no item
in list greater than or equal to target
"""
if sorted_list==[]:
return None
if len(sorted_list)==1:
if target <= sorted_list[0]:
return 0
return None
mid_index = int(len(sorted_list)/2)-1
mid_value = sorted_list[mid_index]
if target <= mid_value:
return binary_search(sorted_list[0:mid_index+1], target)
else:
return mid_index + 1 + binary_search(sorted_list[mid_index+1:], target)
|
Find where a number lies in a sorted list. Return lowest item
that is greater than or equal to target, or None if no item
in list greater than or equal to target
|
Find where a number lies in a sorted list. Return lowest item
that is greater than or equal to target, or None if no item
in list greater than or equal to target
|
[
"Find",
"where",
"a",
"number",
"lies",
"in",
"a",
"sorted",
"list",
".",
"Return",
"lowest",
"item",
"that",
"is",
"greater",
"than",
"or",
"equal",
"to",
"target",
"or",
"None",
"if",
"no",
"item",
"in",
"list",
"greater",
"than",
"or",
"equal",
"to",
"target"
] |
def binary_search(sorted_list, target):
if sorted_list==[]:
return None
if len(sorted_list)==1:
if target <= sorted_list[0]:
return 0
return None
mid_index = int(len(sorted_list)/2)-1
mid_value = sorted_list[mid_index]
if target <= mid_value:
return binary_search(sorted_list[0:mid_index+1], target)
else:
return mid_index + 1 + binary_search(sorted_list[mid_index+1:], target)
|
[
"def",
"binary_search",
"(",
"sorted_list",
",",
"target",
")",
":",
"if",
"sorted_list",
"==",
"[",
"]",
":",
"return",
"None",
"if",
"len",
"(",
"sorted_list",
")",
"==",
"1",
":",
"if",
"target",
"<=",
"sorted_list",
"[",
"0",
"]",
":",
"return",
"0",
"return",
"None",
"mid_index",
"=",
"int",
"(",
"len",
"(",
"sorted_list",
")",
"/",
"2",
")",
"-",
"1",
"mid_value",
"=",
"sorted_list",
"[",
"mid_index",
"]",
"if",
"target",
"<=",
"mid_value",
":",
"return",
"binary_search",
"(",
"sorted_list",
"[",
"0",
":",
"mid_index",
"+",
"1",
"]",
",",
"target",
")",
"else",
":",
"return",
"mid_index",
"+",
"1",
"+",
"binary_search",
"(",
"sorted_list",
"[",
"mid_index",
"+",
"1",
":",
"]",
",",
"target",
")"
] |
Find where a number lies in a sorted list.
|
[
"Find",
"where",
"a",
"number",
"lies",
"in",
"a",
"sorted",
"list",
"."
] |
[
"\"\"\"Find where a number lies in a sorted list. Return lowest item\n that is greater than or equal to target, or None if no item\n in list greater than or equal to target\n \"\"\""
] |
[
{
"param": "sorted_list",
"type": null
},
{
"param": "target",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "sorted_list",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "target",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def binary_search(sorted_list, target):
if sorted_list==[]:
return None
if len(sorted_list)==1:
if target <= sorted_list[0]:
return 0
return None
mid_index = int(len(sorted_list)/2)-1
mid_value = sorted_list[mid_index]
if target <= mid_value:
return binary_search(sorted_list[0:mid_index+1], target)
else:
return mid_index + 1 + binary_search(sorted_list[mid_index+1:], target)
| 1,468 | 314 |
fd387f00a93ab1f9cfa779f9bec86d0018a352aa
|
arccode/factory
|
py/instalog/plugins/output_classify.py
|
[
"BSD-3-Clause"
] |
Python
|
ClassifierOfDay
|
<not_specific>
|
def ClassifierOfDay():
"""Gets relative path of subdirectory."""
# Use local date as classifier.
current_date = datetime.date.today()
subdir_name = current_date.strftime('%Y%m%d')
return subdir_name
|
Gets relative path of subdirectory.
|
Gets relative path of subdirectory.
|
[
"Gets",
"relative",
"path",
"of",
"subdirectory",
"."
] |
def ClassifierOfDay():
current_date = datetime.date.today()
subdir_name = current_date.strftime('%Y%m%d')
return subdir_name
|
[
"def",
"ClassifierOfDay",
"(",
")",
":",
"current_date",
"=",
"datetime",
".",
"date",
".",
"today",
"(",
")",
"subdir_name",
"=",
"current_date",
".",
"strftime",
"(",
"'%Y%m%d'",
")",
"return",
"subdir_name"
] |
Gets relative path of subdirectory.
|
[
"Gets",
"relative",
"path",
"of",
"subdirectory",
"."
] |
[
"\"\"\"Gets relative path of subdirectory.\"\"\"",
"# Use local date as classifier."
] |
[] |
{
"returns": [],
"raises": [],
"params": [],
"outlier_params": [],
"others": []
}
|
import datetime
def ClassifierOfDay():
current_date = datetime.date.today()
subdir_name = current_date.strftime('%Y%m%d')
return subdir_name
| 1,469 | 576 |
8bb3dcfcdf290d31fe67e8f57d9ed5b1bddc5532
|
benjaminysmith/operations
|
src/database_metrics/actions.py
|
[
"MIT"
] |
Python
|
load_data
|
bytes
|
def load_data(client: DockerClient,
image: str,
source: str,
file_pattern: str) -> bytes:
"""
Ingest data into epidata database using the Python docker image.
Copies files from common_full/covidcast/receiving/`source`/`file_pattern`, which should be
copied to the image via the DockerFile, to the common/covidcast/receiving/`source`/ folder
for ingestion. Runs ingestion via the python command.
Parameters
----------
client: DockerClient
Docker Client object containing the Python image.
image: str
Name of image containing data loading code.
source: str
Data source name
file_pattern: str
Filename of pattern to match
Returns
-------
Bytestring of Docker log, either STDOUT or STDERR
"""
return client.containers.run(
image=client.images.get(image),
command=f'bash -c "mkdir -p /common/covidcast/receiving/{source} && '
f'cp common_full/covidcast/receiving/{source}/{file_pattern} '
f'/common/covidcast/receiving/{source}/ && '
f'python3 -m delphi.epidata.acquisition.covidcast.csv_to_database '
f'--data_dir /common/covidcast/"',
network="delphi-net")
|
Ingest data into epidata database using the Python docker image.
Copies files from common_full/covidcast/receiving/`source`/`file_pattern`, which should be
copied to the image via the DockerFile, to the common/covidcast/receiving/`source`/ folder
for ingestion. Runs ingestion via the python command.
Parameters
----------
client: DockerClient
Docker Client object containing the Python image.
image: str
Name of image containing data loading code.
source: str
Data source name
file_pattern: str
Filename of pattern to match
Returns
-------
Bytestring of Docker log, either STDOUT or STDERR
|
Ingest data into epidata database using the Python docker image.
Parameters
DockerClient
Docker Client object containing the Python image.
image: str
Name of image containing data loading code.
source: str
Data source name
file_pattern: str
Filename of pattern to match
Returns
Bytestring of Docker log, either STDOUT or STDERR
|
[
"Ingest",
"data",
"into",
"epidata",
"database",
"using",
"the",
"Python",
"docker",
"image",
".",
"Parameters",
"DockerClient",
"Docker",
"Client",
"object",
"containing",
"the",
"Python",
"image",
".",
"image",
":",
"str",
"Name",
"of",
"image",
"containing",
"data",
"loading",
"code",
".",
"source",
":",
"str",
"Data",
"source",
"name",
"file_pattern",
":",
"str",
"Filename",
"of",
"pattern",
"to",
"match",
"Returns",
"Bytestring",
"of",
"Docker",
"log",
"either",
"STDOUT",
"or",
"STDERR"
] |
def load_data(client: DockerClient,
image: str,
source: str,
file_pattern: str) -> bytes:
return client.containers.run(
image=client.images.get(image),
command=f'bash -c "mkdir -p /common/covidcast/receiving/{source} && '
f'cp common_full/covidcast/receiving/{source}/{file_pattern} '
f'/common/covidcast/receiving/{source}/ && '
f'python3 -m delphi.epidata.acquisition.covidcast.csv_to_database '
f'--data_dir /common/covidcast/"',
network="delphi-net")
|
[
"def",
"load_data",
"(",
"client",
":",
"DockerClient",
",",
"image",
":",
"str",
",",
"source",
":",
"str",
",",
"file_pattern",
":",
"str",
")",
"->",
"bytes",
":",
"return",
"client",
".",
"containers",
".",
"run",
"(",
"image",
"=",
"client",
".",
"images",
".",
"get",
"(",
"image",
")",
",",
"command",
"=",
"f'bash -c \"mkdir -p /common/covidcast/receiving/{source} && '",
"f'cp common_full/covidcast/receiving/{source}/{file_pattern} '",
"f'/common/covidcast/receiving/{source}/ && '",
"f'python3 -m delphi.epidata.acquisition.covidcast.csv_to_database '",
"f'--data_dir /common/covidcast/\"'",
",",
"network",
"=",
"\"delphi-net\"",
")"
] |
Ingest data into epidata database using the Python docker image.
|
[
"Ingest",
"data",
"into",
"epidata",
"database",
"using",
"the",
"Python",
"docker",
"image",
"."
] |
[
"\"\"\"\n Ingest data into epidata database using the Python docker image.\n\n Copies files from common_full/covidcast/receiving/`source`/`file_pattern`, which should be\n copied to the image via the DockerFile, to the common/covidcast/receiving/`source`/ folder\n for ingestion. Runs ingestion via the python command.\n\n Parameters\n ----------\n client: DockerClient\n Docker Client object containing the Python image.\n image: str\n Name of image containing data loading code.\n source: str\n Data source name\n file_pattern: str\n Filename of pattern to match\n\n Returns\n -------\n Bytestring of Docker log, either STDOUT or STDERR\n \"\"\""
] |
[
{
"param": "client",
"type": "DockerClient"
},
{
"param": "image",
"type": "str"
},
{
"param": "source",
"type": "str"
},
{
"param": "file_pattern",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "client",
"type": "DockerClient",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "image",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "source",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "file_pattern",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def load_data(client: DockerClient,
image: str,
source: str,
file_pattern: str) -> bytes:
return client.containers.run(
image=client.images.get(image),
command=f'bash -c "mkdir -p /common/covidcast/receiving/{source} && '
f'cp common_full/covidcast/receiving/{source}/{file_pattern} '
f'/common/covidcast/receiving/{source}/ && '
f'python3 -m delphi.epidata.acquisition.covidcast.csv_to_database '
f'--data_dir /common/covidcast/"',
network="delphi-net")
| 1,470 | 831 |
6772f7b7186613658b02812362eb40e1b7474851
|
tlzhu19/elyra
|
elyra/metadata/storage.py
|
[
"BSD-3-Clause"
] |
Python
|
_rollback
|
None
|
def _rollback(resource: str, renamed_resource: str) -> None:
"""Rollback changes made during persistence (typically updates) and exceptions are encountered """
if os.path.exists(resource):
os.remove(resource)
if renamed_resource: # Restore the renamed file
os.rename(renamed_resource, resource)
|
Rollback changes made during persistence (typically updates) and exceptions are encountered
|
Rollback changes made during persistence (typically updates) and exceptions are encountered
|
[
"Rollback",
"changes",
"made",
"during",
"persistence",
"(",
"typically",
"updates",
")",
"and",
"exceptions",
"are",
"encountered"
] |
def _rollback(resource: str, renamed_resource: str) -> None:
if os.path.exists(resource):
os.remove(resource)
if renamed_resource:
os.rename(renamed_resource, resource)
|
[
"def",
"_rollback",
"(",
"resource",
":",
"str",
",",
"renamed_resource",
":",
"str",
")",
"->",
"None",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"resource",
")",
":",
"os",
".",
"remove",
"(",
"resource",
")",
"if",
"renamed_resource",
":",
"os",
".",
"rename",
"(",
"renamed_resource",
",",
"resource",
")"
] |
Rollback changes made during persistence (typically updates) and exceptions are encountered
|
[
"Rollback",
"changes",
"made",
"during",
"persistence",
"(",
"typically",
"updates",
")",
"and",
"exceptions",
"are",
"encountered"
] |
[
"\"\"\"Rollback changes made during persistence (typically updates) and exceptions are encountered \"\"\"",
"# Restore the renamed file"
] |
[
{
"param": "resource",
"type": "str"
},
{
"param": "renamed_resource",
"type": "str"
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "resource",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "renamed_resource",
"type": "str",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
import os
def _rollback(resource: str, renamed_resource: str) -> None:
if os.path.exists(resource):
os.remove(resource)
if renamed_resource:
os.rename(renamed_resource, resource)
| 1,471 | 781 |
e96d9f59aebc5f779de67ebb1269305bc428274a
|
leonardogian/booleannet
|
boolean2/plde/helper.py
|
[
"MIT"
] |
Python
|
piecewise
|
<not_specific>
|
def piecewise( tokens, indexer ):
"""
Generates a piecewise equation from the tokens
"""
base_node = tokens[1].value
base_index = indexer[base_node]
line = []
line.append ( 'float(' )
nodes = [ t.value for t in tokens[4:] ]
for node in nodes:
# replace each node with the comparison
if node in indexer:
index = indexer[node]
value = " ( c%d > t%d ) " % ( index, index )
else:
value = node
line.append ( value )
line.append ( ')' )
# add decay term
line.append ( "- d%d * c%d" % ( base_index, base_index ) )
return ' '.join( line )
|
Generates a piecewise equation from the tokens
|
Generates a piecewise equation from the tokens
|
[
"Generates",
"a",
"piecewise",
"equation",
"from",
"the",
"tokens"
] |
def piecewise( tokens, indexer ):
base_node = tokens[1].value
base_index = indexer[base_node]
line = []
line.append ( 'float(' )
nodes = [ t.value for t in tokens[4:] ]
for node in nodes:
if node in indexer:
index = indexer[node]
value = " ( c%d > t%d ) " % ( index, index )
else:
value = node
line.append ( value )
line.append ( ')' )
line.append ( "- d%d * c%d" % ( base_index, base_index ) )
return ' '.join( line )
|
[
"def",
"piecewise",
"(",
"tokens",
",",
"indexer",
")",
":",
"base_node",
"=",
"tokens",
"[",
"1",
"]",
".",
"value",
"base_index",
"=",
"indexer",
"[",
"base_node",
"]",
"line",
"=",
"[",
"]",
"line",
".",
"append",
"(",
"'float('",
")",
"nodes",
"=",
"[",
"t",
".",
"value",
"for",
"t",
"in",
"tokens",
"[",
"4",
":",
"]",
"]",
"for",
"node",
"in",
"nodes",
":",
"if",
"node",
"in",
"indexer",
":",
"index",
"=",
"indexer",
"[",
"node",
"]",
"value",
"=",
"\" ( c%d > t%d ) \"",
"%",
"(",
"index",
",",
"index",
")",
"else",
":",
"value",
"=",
"node",
"line",
".",
"append",
"(",
"value",
")",
"line",
".",
"append",
"(",
"')'",
")",
"line",
".",
"append",
"(",
"\"- d%d * c%d\"",
"%",
"(",
"base_index",
",",
"base_index",
")",
")",
"return",
"' '",
".",
"join",
"(",
"line",
")"
] |
Generates a piecewise equation from the tokens
|
[
"Generates",
"a",
"piecewise",
"equation",
"from",
"the",
"tokens"
] |
[
"\"\"\"\n Generates a piecewise equation from the tokens\n \"\"\"",
"# replace each node with the comparison",
"# add decay term"
] |
[
{
"param": "tokens",
"type": null
},
{
"param": "indexer",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "tokens",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
},
{
"identifier": "indexer",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def piecewise( tokens, indexer ):
base_node = tokens[1].value
base_index = indexer[base_node]
line = []
line.append ( 'float(' )
nodes = [ t.value for t in tokens[4:] ]
for node in nodes:
if node in indexer:
index = indexer[node]
value = " ( c%d > t%d ) " % ( index, index )
else:
value = node
line.append ( value )
line.append ( ')' )
line.append ( "- d%d * c%d" % ( base_index, base_index ) )
return ' '.join( line )
| 1,472 | 750 |
f09e1e4e93a334b8f887770e41783d4ae4005b1c
|
DRI-AIC/Zeph
|
zeph/geofunctions.py
|
[
"MIT"
] |
Python
|
round_geo
|
<not_specific>
|
def round_geo(geo, n=10):
"""Round the values of a geotransform to n digits
Args:
geo (tuple): :class:`gdal.Geotransform` object
n (int): number of digits to round the
:class:`gdal.Geotransform` to
Returns:
tuple: :class:`gdal.Geotransform` rounded to n digits
"""
return tuple([round(i,n) for i in geo])
|
Round the values of a geotransform to n digits
Args:
geo (tuple): :class:`gdal.Geotransform` object
n (int): number of digits to round the
:class:`gdal.Geotransform` to
Returns:
tuple: :class:`gdal.Geotransform` rounded to n digits
|
Round the values of a geotransform to n digits
|
[
"Round",
"the",
"values",
"of",
"a",
"geotransform",
"to",
"n",
"digits"
] |
def round_geo(geo, n=10):
return tuple([round(i,n) for i in geo])
|
[
"def",
"round_geo",
"(",
"geo",
",",
"n",
"=",
"10",
")",
":",
"return",
"tuple",
"(",
"[",
"round",
"(",
"i",
",",
"n",
")",
"for",
"i",
"in",
"geo",
"]",
")"
] |
Round the values of a geotransform to n digits
|
[
"Round",
"the",
"values",
"of",
"a",
"geotransform",
"to",
"n",
"digits"
] |
[
"\"\"\"Round the values of a geotransform to n digits\n\n Args:\n geo (tuple): :class:`gdal.Geotransform` object\n n (int): number of digits to round the\n :class:`gdal.Geotransform` to\n\n Returns:\n tuple: :class:`gdal.Geotransform` rounded to n digits\n\n \"\"\""
] |
[
{
"param": "geo",
"type": null
},
{
"param": "n",
"type": null
}
] |
{
"returns": [
{
"docstring": ":class:`gdal.Geotransform` rounded to n digits",
"docstring_tokens": [
":",
"class",
":",
"`",
"gdal",
".",
"Geotransform",
"`",
"rounded",
"to",
"n",
"digits"
],
"type": "tuple"
}
],
"raises": [],
"params": [
{
"identifier": "geo",
"type": null,
"docstring": null,
"docstring_tokens": [
"None"
],
"default": null,
"is_optional": false
},
{
"identifier": "n",
"type": null,
"docstring": "number of digits to round the\n:class:`gdal.Geotransform` to",
"docstring_tokens": [
"number",
"of",
"digits",
"to",
"round",
"the",
":",
"class",
":",
"`",
"gdal",
".",
"Geotransform",
"`",
"to"
],
"default": null,
"is_optional": false
}
],
"outlier_params": [],
"others": []
}
|
def round_geo(geo, n=10):
return tuple([round(i,n) for i in geo])
| 1,473 | 513 |
8d0921c5eaa3067d68971a3ca9aa0e86f07fa446
|
kusanagi/kusanagi-sdk-python
|
kusanagi/sdk/lib/cli.py
|
[
"MIT"
] |
Python
|
parse_key_value_list
|
dict
|
def parse_key_value_list(values: list) -> dict:
"""
Option callback to validate a list of key/value arguments.
Converts 'NAME=VALUE' CLI parameters to a dictionary.
:raises: ValueError
"""
if not values:
return {}
params = {}
for value in values:
parts = value.split('=', 1)
if len(parts) != 2:
raise ValueError('Invalid parameter format')
param_name, param_value = parts
params[param_name] = param_value
return params
|
Option callback to validate a list of key/value arguments.
Converts 'NAME=VALUE' CLI parameters to a dictionary.
:raises: ValueError
|
Option callback to validate a list of key/value arguments.
Converts 'NAME=VALUE' CLI parameters to a dictionary.
|
[
"Option",
"callback",
"to",
"validate",
"a",
"list",
"of",
"key",
"/",
"value",
"arguments",
".",
"Converts",
"'",
"NAME",
"=",
"VALUE",
"'",
"CLI",
"parameters",
"to",
"a",
"dictionary",
"."
] |
def parse_key_value_list(values: list) -> dict:
if not values:
return {}
params = {}
for value in values:
parts = value.split('=', 1)
if len(parts) != 2:
raise ValueError('Invalid parameter format')
param_name, param_value = parts
params[param_name] = param_value
return params
|
[
"def",
"parse_key_value_list",
"(",
"values",
":",
"list",
")",
"->",
"dict",
":",
"if",
"not",
"values",
":",
"return",
"{",
"}",
"params",
"=",
"{",
"}",
"for",
"value",
"in",
"values",
":",
"parts",
"=",
"value",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"len",
"(",
"parts",
")",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'Invalid parameter format'",
")",
"param_name",
",",
"param_value",
"=",
"parts",
"params",
"[",
"param_name",
"]",
"=",
"param_value",
"return",
"params"
] |
Option callback to validate a list of key/value arguments.
|
[
"Option",
"callback",
"to",
"validate",
"a",
"list",
"of",
"key",
"/",
"value",
"arguments",
"."
] |
[
"\"\"\"\n Option callback to validate a list of key/value arguments.\n\n Converts 'NAME=VALUE' CLI parameters to a dictionary.\n\n :raises: ValueError\n\n \"\"\""
] |
[
{
"param": "values",
"type": "list"
}
] |
{
"returns": [],
"raises": [
{
"docstring": null,
"docstring_tokens": [
"None"
],
"type": null
}
],
"params": [
{
"identifier": "values",
"type": "list",
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def parse_key_value_list(values: list) -> dict:
if not values:
return {}
params = {}
for value in values:
parts = value.split('=', 1)
if len(parts) != 2:
raise ValueError('Invalid parameter format')
param_name, param_value = parts
params[param_name] = param_value
return params
| 1,474 | 230 |
dd50d69c6519eb9cfe2319cb7ca84bae2d9b55b7
|
tuanquanghpvn/odoo8-tutorial
|
odoo/addons/website/models/ir_qweb.py
|
[
"MIT"
] |
Python
|
_realize_padding
| null |
def _realize_padding(it):
""" Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
"""
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
# leftover padding irrelevant as the output will be stripped
|
Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
|
Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding. Runs thereof can be collapsed
into the largest requests and converted to newlines.
|
[
"Fold",
"and",
"convert",
"padding",
"requests",
":",
"integers",
"in",
"the",
"output",
"sequence",
"are",
"requests",
"for",
"at",
"least",
"n",
"newlines",
"of",
"padding",
".",
"Runs",
"thereof",
"can",
"be",
"collapsed",
"into",
"the",
"largest",
"requests",
"and",
"converted",
"to",
"newlines",
"."
] |
def _realize_padding(it):
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
|
[
"def",
"_realize_padding",
"(",
"it",
")",
":",
"padding",
"=",
"None",
"for",
"item",
"in",
"it",
":",
"if",
"isinstance",
"(",
"item",
",",
"int",
")",
":",
"padding",
"=",
"max",
"(",
"padding",
",",
"item",
")",
"continue",
"if",
"padding",
":",
"yield",
"'\\n'",
"*",
"padding",
"padding",
"=",
"None",
"yield",
"item"
] |
Fold and convert padding requests: integers in the output sequence are
requests for at least n newlines of padding.
|
[
"Fold",
"and",
"convert",
"padding",
"requests",
":",
"integers",
"in",
"the",
"output",
"sequence",
"are",
"requests",
"for",
"at",
"least",
"n",
"newlines",
"of",
"padding",
"."
] |
[
"\"\"\" Fold and convert padding requests: integers in the output sequence are\n requests for at least n newlines of padding. Runs thereof can be collapsed\n into the largest requests and converted to newlines.\n \"\"\"",
"# leftover padding irrelevant as the output will be stripped"
] |
[
{
"param": "it",
"type": null
}
] |
{
"returns": [],
"raises": [],
"params": [
{
"identifier": "it",
"type": null,
"docstring": null,
"docstring_tokens": [],
"default": null,
"is_optional": null
}
],
"outlier_params": [],
"others": []
}
|
def _realize_padding(it):
padding = None
for item in it:
if isinstance(item, int):
padding = max(padding, item)
continue
if padding:
yield '\n' * padding
padding = None
yield item
| 1,477 | 841 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.