repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
mlouielu/twstock
twstock/legacy.py
https://github.com/mlouielu/twstock/blob/cddddcc084d2d00497d591ab3059e3205b755825/twstock/legacy.py#L87-L94
def bias_ratio(self, position=False): """ 判斷乖離 :param bool positive_or_negative: 正乖離 為 True,負乖離 為 False """ return self.data.ma_bias_ratio_pivot( self.data.ma_bias_ratio(3, 6), position=position)
[ "def", "bias_ratio", "(", "self", ",", "position", "=", "False", ")", ":", "return", "self", ".", "data", ".", "ma_bias_ratio_pivot", "(", "self", ".", "data", ".", "ma_bias_ratio", "(", "3", ",", "6", ")", ",", "position", "=", "position", ")" ]
判斷乖離 :param bool positive_or_negative: 正乖離 為 True,負乖離 為 False
[ "判斷乖離" ]
python
train
zimeon/iiif
iiif/static.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L57-L89
def static_full_sizes(width, height, tilesize): """Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize. """ # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
[ "def", "static_full_sizes", "(", "width", ",", "height", ",", "tilesize", ")", ":", "# FIXME - Not sure what correct algorithm is for this, from", "# observation of Openseadragon it seems that one keeps halving", "# the pixel size of the full image until until both width and", "# height ar...
Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize.
[ "Generator", "for", "scaled", "-", "down", "full", "image", "sizes", "." ]
python
train
arcturial/clickatell-python
clickatell/http/__init__.py
https://github.com/arcturial/clickatell-python/blob/4a554c28edaf2e5d0d9e81b4c9415241bfd61d00/clickatell/http/__init__.py#L31-L69
def sendMessage(self, to, message, extra={}): """ If the 'to' parameter is a single entry, we will parse it into a list. We will merge default values into the request data and the extra parameters provided by the user. """ to = to if isinstance(to, list) else [to] to = [str(i) for i in to] data = {'to': ','.join(to), 'text': message} data = self.merge(data, {'callback': 7, 'mo': 1}, extra) try: content = self.parseLegacy(self.request('http/sendmsg', data)); except ClickatellError as e: # The error that gets catched here will only be raised if the request was for # one number only. We can safely assume we are only dealing with a single response # here. content = {'error': e.message, 'errorCode': e.code, 'To': to[0]} # Force all responses to behave like a list, for consistency content = content if isinstance(content, list) else [content] result = [] # Sending messages will also result in a "stable" response. The reason # for this is that we can't actually know if the request failed or not...a message # that could not be delivered is different from a failed request...so errors are returned # per message. In the case of global failures (like authentication) all messages will contain # the specific error as part of the response body. for index, entry in enumerate(content): entry = self.merge({'ID': False, 'To': to[index], 'error': False, 'errorCode': False}, entry) result.append({ 'id': entry['ID'], 'destination': entry['To'], 'error': entry['error'], 'errorCode': entry['errorCode'] }); return result
[ "def", "sendMessage", "(", "self", ",", "to", ",", "message", ",", "extra", "=", "{", "}", ")", ":", "to", "=", "to", "if", "isinstance", "(", "to", ",", "list", ")", "else", "[", "to", "]", "to", "=", "[", "str", "(", "i", ")", "for", "i", ...
If the 'to' parameter is a single entry, we will parse it into a list. We will merge default values into the request data and the extra parameters provided by the user.
[ "If", "the", "to", "parameter", "is", "a", "single", "entry", "we", "will", "parse", "it", "into", "a", "list", ".", "We", "will", "merge", "default", "values", "into", "the", "request", "data", "and", "the", "extra", "parameters", "provided", "by", "the...
python
train
tklovett/PyShirtsIO
ShirtsIO/request.py
https://github.com/tklovett/PyShirtsIO/blob/ff2f2d3b5e4ab2813abbce8545b27319c6af0def/ShirtsIO/request.py#L51-L72
def json_parse(self, content): """ Wraps and abstracts content validation and JSON parsing to make sure the user gets the correct response. :param content: The content returned from the web request to be parsed as json :returns: a dict of the json response """ try: data = json.loads(content) except ValueError, e: return {'meta': { 'status': 500, 'msg': 'Server Error'}, 'response': {"error": "Malformed JSON or HTML was returned."}} #We only really care about the response if we succeed #and the error if we fail if 'error' in data: return {'meta': { 'status': 400, 'msg': 'Bad Request'}, 'response': {"error": data['error']}} elif 'result' in data: return data['result'] else: return {}
[ "def", "json_parse", "(", "self", ",", "content", ")", ":", "try", ":", "data", "=", "json", ".", "loads", "(", "content", ")", "except", "ValueError", ",", "e", ":", "return", "{", "'meta'", ":", "{", "'status'", ":", "500", ",", "'msg'", ":", "'S...
Wraps and abstracts content validation and JSON parsing to make sure the user gets the correct response. :param content: The content returned from the web request to be parsed as json :returns: a dict of the json response
[ "Wraps", "and", "abstracts", "content", "validation", "and", "JSON", "parsing", "to", "make", "sure", "the", "user", "gets", "the", "correct", "response", ".", ":", "param", "content", ":", "The", "content", "returned", "from", "the", "web", "request", "to",...
python
valid
cloudtools/troposphere
setup.py
https://github.com/cloudtools/troposphere/blob/f7ea5591a7c287a843adc9c184d2f56064cfc632/setup.py#L26-L31
def file_contents(file_name): """Given a file name to a valid file returns the file object.""" curr_dir = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(curr_dir, file_name)) as the_file: contents = the_file.read() return contents
[ "def", "file_contents", "(", "file_name", ")", ":", "curr_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "curr_dir", ",", "f...
Given a file name to a valid file returns the file object.
[ "Given", "a", "file", "name", "to", "a", "valid", "file", "returns", "the", "file", "object", "." ]
python
train
ethereum/web3.py
web3/_utils/normalizers.py
https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/normalizers.py#L84-L103
def parse_basic_type_str(old_normalizer): """ Modifies a normalizer to automatically parse the incoming type string. If that type string does not represent a basic type (i.e. non-tuple type) or is not parsable, the normalizer does nothing. """ @functools.wraps(old_normalizer) def new_normalizer(type_str, data): try: abi_type = parse(type_str) except ParseError: # If type string is not parsable, do nothing return type_str, data if not isinstance(abi_type, BasicType): return type_str, data return old_normalizer(abi_type, type_str, data) return new_normalizer
[ "def", "parse_basic_type_str", "(", "old_normalizer", ")", ":", "@", "functools", ".", "wraps", "(", "old_normalizer", ")", "def", "new_normalizer", "(", "type_str", ",", "data", ")", ":", "try", ":", "abi_type", "=", "parse", "(", "type_str", ")", "except",...
Modifies a normalizer to automatically parse the incoming type string. If that type string does not represent a basic type (i.e. non-tuple type) or is not parsable, the normalizer does nothing.
[ "Modifies", "a", "normalizer", "to", "automatically", "parse", "the", "incoming", "type", "string", ".", "If", "that", "type", "string", "does", "not", "represent", "a", "basic", "type", "(", "i", ".", "e", ".", "non", "-", "tuple", "type", ")", "or", ...
python
train
rackerlabs/fleece
fleece/cli/run/run.py
https://github.com/rackerlabs/fleece/blob/42d79dfa0777e99dbb09bc46105449a9be5dbaa9/fleece/cli/run/run.py#L192-L199
def validate_args(args): """Validate command-line arguments.""" if not any([args.environment, args.stage, args.account]): sys.exit(NO_ACCT_OR_ENV_ERROR) if args.environment and args.account: sys.exit(ENV_AND_ACCT_ERROR) if args.environment and args.role: sys.exit(ENV_AND_ROLE_ERROR)
[ "def", "validate_args", "(", "args", ")", ":", "if", "not", "any", "(", "[", "args", ".", "environment", ",", "args", ".", "stage", ",", "args", ".", "account", "]", ")", ":", "sys", ".", "exit", "(", "NO_ACCT_OR_ENV_ERROR", ")", "if", "args", ".", ...
Validate command-line arguments.
[ "Validate", "command", "-", "line", "arguments", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/sys_interface.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/sys_interface.py#L86-L88
def dispatch_job_hook(self, link, key, job_config, logfile, stream=sys.stdout): """Hook to dispatch a single job""" raise NotImplementedError("SysInterface.dispatch_job_hook")
[ "def", "dispatch_job_hook", "(", "self", ",", "link", ",", "key", ",", "job_config", ",", "logfile", ",", "stream", "=", "sys", ".", "stdout", ")", ":", "raise", "NotImplementedError", "(", "\"SysInterface.dispatch_job_hook\"", ")" ]
Hook to dispatch a single job
[ "Hook", "to", "dispatch", "a", "single", "job" ]
python
train
heikomuller/sco-datastore
scodata/datastore.py
https://github.com/heikomuller/sco-datastore/blob/7180a6b51150667e47629da566aedaa742e39342/scodata/datastore.py#L334-L386
def upsert_object_property(self, identifier, properties, ignore_constraints=False): """Manipulate an object's property set. Inserts or updates properties in given dictionary. If a property key does not exist in the object's property set it is created. If the value is None an existing property is deleted. Existing object properties that are not present in the given property set remain unaffacted. Deleting mandatory properties or updating immutable properties results in a ValueError. These constraints can be disabled using the ignore_constraints parameter. Parameters ---------- identifier : string Unique object identifier properties : Dictionary() Dictionary of property names and their new values. ignore_constraints : Boolean Flag indicating whether to ignore immutable and mandatory property constraints (True) or nore (False, Default). Returns ------- ObjectHandle Handle to updated object or None if object does not exist """ # Retrieve the object with the gievn identifier. This is a (sub-)class # of ObjectHandle obj = self.get_object(identifier) if not obj is None: # Modify property set of retrieved object handle. Raise exception if # and of the upserts is not valid. for key in properties: value = properties[key] # If the update affects an immutable property raise exception if not ignore_constraints and key in self.immutable_properties: raise ValueError('update to immutable property: ' + key) # Check whether the operation is an UPSERT (value != None) or # DELETE (value == None) if not value is None: obj.properties[key] = value else: # DELETE. Make sure the property is not mandatory if not ignore_constraints and key in self.mandatory_properties: raise ValueError('delete mandatory property: ' + key) elif key in obj.properties: del obj.properties[key] # Update object in database self.replace_object(obj) # Return object handle return obj
[ "def", "upsert_object_property", "(", "self", ",", "identifier", ",", "properties", ",", "ignore_constraints", "=", "False", ")", ":", "# Retrieve the object with the gievn identifier. This is a (sub-)class", "# of ObjectHandle", "obj", "=", "self", ".", "get_object", "(", ...
Manipulate an object's property set. Inserts or updates properties in given dictionary. If a property key does not exist in the object's property set it is created. If the value is None an existing property is deleted. Existing object properties that are not present in the given property set remain unaffacted. Deleting mandatory properties or updating immutable properties results in a ValueError. These constraints can be disabled using the ignore_constraints parameter. Parameters ---------- identifier : string Unique object identifier properties : Dictionary() Dictionary of property names and their new values. ignore_constraints : Boolean Flag indicating whether to ignore immutable and mandatory property constraints (True) or nore (False, Default). Returns ------- ObjectHandle Handle to updated object or None if object does not exist
[ "Manipulate", "an", "object", "s", "property", "set", ".", "Inserts", "or", "updates", "properties", "in", "given", "dictionary", ".", "If", "a", "property", "key", "does", "not", "exist", "in", "the", "object", "s", "property", "set", "it", "is", "created...
python
train
OpenTreeOfLife/peyotl
peyotl/nexson_syntax/nexml2nexson.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/nexson_syntax/nexml2nexson.py#L127-L167
def _gen_hbf_el(self, x): """ Builds a dictionary from the DOM element x The function Uses as hacky splitting of attribute or tag names using {} to remove namespaces. returns a pair of: the tag of `x` and the honeybadgerfish representation of the subelements of x Indirect recursion through _hbf_handle_child_elements """ obj = {} # grab the tag of x el_name = x.nodeName assert el_name is not None # add the attributes to the dictionary att_container = x.attributes ns_obj = {} if att_container is not None: for i in range(att_container.length): attr = att_container.item(i) n = attr.name t = None if n.startswith('xmlns'): if n == 'xmlns': t = '$' elif n.startswith('xmlns:'): t = n[6:] # strip off the xmlns: if t is None: obj['@' + n] = attr.value else: ns_obj[t] = attr.value if ns_obj: obj['@xmlns'] = ns_obj x.normalize() # store the text content of the element under the key '$' text_content, ntl = _extract_text_and_child_element_list(x) if text_content: obj['$'] = text_content self._hbf_handle_child_elements(obj, ntl) return el_name, obj
[ "def", "_gen_hbf_el", "(", "self", ",", "x", ")", ":", "obj", "=", "{", "}", "# grab the tag of x", "el_name", "=", "x", ".", "nodeName", "assert", "el_name", "is", "not", "None", "# add the attributes to the dictionary", "att_container", "=", "x", ".", "attri...
Builds a dictionary from the DOM element x The function Uses as hacky splitting of attribute or tag names using {} to remove namespaces. returns a pair of: the tag of `x` and the honeybadgerfish representation of the subelements of x Indirect recursion through _hbf_handle_child_elements
[ "Builds", "a", "dictionary", "from", "the", "DOM", "element", "x", "The", "function", "Uses", "as", "hacky", "splitting", "of", "attribute", "or", "tag", "names", "using", "{}", "to", "remove", "namespaces", ".", "returns", "a", "pair", "of", ":", "the", ...
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/imphook2.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/imphook2.py#L365-L437
def _InvokeImportCallbackBySuffix(names): """Invokes import callbacks for newly loaded modules. Uses a path suffix match to identify whether a loaded module matches the file path provided by the user. Args: names: A set of names for modules that are loaded by the current import. The set may contain some superfluous entries that were already loaded before this import, or some entries that do not correspond to a module. The list is expected to be much smaller than the exact sys.modules so that a linear search is not as costly. """ def GetModuleFromName(name, path): """Returns the loaded module for this name/path, or None if not found. Args: name: A string that may represent the name of a loaded Python module. path: If 'name' ends with '.*', then the last path component in 'path' is used to identify what the wildcard may map to. Does not contain file extension. Returns: The loaded module for the given name and path, or None if a loaded module was not found. """ # The from-import syntax can be used as 'from p1.p2 import *'. In this case, # we cannot know what modules will match the wildcard. However, we know that # the wildcard can only be used to import leaf modules. So, we guess that # the leaf module will have the same name as the leaf file name the user # provided. For instance, # User input path = 'foo.py' # Currently executing import: # from pkg1.pkg2 import * # Then, we combine: # 1. 'pkg1.pkg2' from import's outer package and # 2. Add 'foo' as our guess for the leaf module name. # So, we will search for modules with name similar to 'pkg1.pkg2.foo'. if name.endswith('.*'): # Replace the final '*' with the name of the module we are looking for. name = name.rpartition('.')[0] + '.' + path.split('/')[-1] # Check if the module was loaded. return sys.modules.get(name) # _import_callbacks might change during iteration because RemoveCallback() # might delete items. Iterate over a copy to avoid a # 'dictionary changed size during iteration' error. for path, callbacks in list(_import_callbacks.items()): root = os.path.splitext(path)[0] nonempty_names = (n for n in names if n) modules = (GetModuleFromName(name, root) for name in nonempty_names) nonempty_modules = (m for m in modules if m) for module in nonempty_modules: # TODO(emrekultursay): Write unit test to cover None case. mod_file = getattr(module, '__file__', None) if not mod_file: continue mod_root = os.path.splitext(mod_file)[0] # If the module is relative, add the curdir prefix to convert it to # absolute path. Note that we don't use os.path.abspath because it # also normalizes the path (which has side effects we don't want). if not os.path.isabs(mod_root): mod_root = os.path.join(os.curdir, mod_root) if module_utils2.IsPathSuffix(mod_root, root): for callback in callbacks.copy(): callback(module) break
[ "def", "_InvokeImportCallbackBySuffix", "(", "names", ")", ":", "def", "GetModuleFromName", "(", "name", ",", "path", ")", ":", "\"\"\"Returns the loaded module for this name/path, or None if not found.\n\n Args:\n name: A string that may represent the name of a loaded Python mod...
Invokes import callbacks for newly loaded modules. Uses a path suffix match to identify whether a loaded module matches the file path provided by the user. Args: names: A set of names for modules that are loaded by the current import. The set may contain some superfluous entries that were already loaded before this import, or some entries that do not correspond to a module. The list is expected to be much smaller than the exact sys.modules so that a linear search is not as costly.
[ "Invokes", "import", "callbacks", "for", "newly", "loaded", "modules", "." ]
python
train
mfcovington/django-project-home-templatetags
project_home_tags/templatetags/project_home.py
https://github.com/mfcovington/django-project-home-templatetags/blob/abc660906086088792c5e5e7be6ecd151c2ccddb/project_home_tags/templatetags/project_home.py#L39-L62
def silence_without_namespace(f): """Decorator to silence template tags if 'PROJECT_HOME_NAMESPACE' is not defined in settings. Usage Example: from django import template register = template.Library() @register.simple_tag @silence_without_namespace def a_template_tag(*args): ... """ @wraps(f) def wrapped(label=None): if not home_namespace: return '' if label: return f(label) else: return f(home_label) return wrapped
[ "def", "silence_without_namespace", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "label", "=", "None", ")", ":", "if", "not", "home_namespace", ":", "return", "''", "if", "label", ":", "return", "f", "(", "label", ")", "els...
Decorator to silence template tags if 'PROJECT_HOME_NAMESPACE' is not defined in settings. Usage Example: from django import template register = template.Library() @register.simple_tag @silence_without_namespace def a_template_tag(*args): ...
[ "Decorator", "to", "silence", "template", "tags", "if", "PROJECT_HOME_NAMESPACE", "is", "not", "defined", "in", "settings", "." ]
python
test
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L1106-L1131
def audits(self, ticket=None, include=None, **kwargs): """ Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id """ if ticket is not None: return self._query_zendesk(self.endpoint.audits, 'ticket_audit', id=ticket, include=include) else: return self._query_zendesk(self.endpoint.audits.cursor, 'ticket_audit', include=include, **kwargs)
[ "def", "audits", "(", "self", ",", "ticket", "=", "None", ",", "include", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "ticket", "is", "not", "None", ":", "return", "self", ".", "_query_zendesk", "(", "self", ".", "endpoint", ".", "audits", ...
Retrieve TicketAudits. If ticket is passed, return the tickets for a specific audit. If ticket_id is None, a TicketAuditGenerator is returned to handle pagination. The way this generator works is a different to the other Zenpy generators as it is cursor based, allowing you to change the direction that you are consuming objects. This is done with the reversed() python method. For example: .. code-block:: python for audit in reversed(zenpy_client.tickets.audits()): print(audit) See the `Zendesk docs <https://developer.zendesk.com/rest_api/docs/core/ticket_audits#pagination>`__ for information on additional parameters. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param ticket: Ticket object or id
[ "Retrieve", "TicketAudits", ".", "If", "ticket", "is", "passed", "return", "the", "tickets", "for", "a", "specific", "audit", "." ]
python
train
treycucco/pyebnf
pyebnf/_hand_written_parser.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/_hand_written_parser.py#L93-L140
def expression(self, text): """expression = number , op_mult , expression | expression_terminal , op_mult , number , [operator , expression] | expression_terminal , op_add , [operator , expression] | expression_terminal , [operator , expression] ; """ self._attempting(text) return alternation([ # number , op_mult , expression concatenation([ self.number, self.op_mult, self.expression ], ignore_whitespace=True), # expression_terminal , op_mult , number , [operator , expression] concatenation([ self.expression_terminal, self.op_mult, self.number, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True), # expression_terminal , op_add , [operator , expression] concatenation([ self.expression_terminal, self.op_add, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True), # expression_terminal , [operator , expression] concatenation([ self.expression_terminal, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True) ])(text).retyped(TokenType.expression)
[ "def", "expression", "(", "self", ",", "text", ")", ":", "self", ".", "_attempting", "(", "text", ")", "return", "alternation", "(", "[", "# number , op_mult , expression", "concatenation", "(", "[", "self", ".", "number", ",", "self", ".", "op_mult", ",", ...
expression = number , op_mult , expression | expression_terminal , op_mult , number , [operator , expression] | expression_terminal , op_add , [operator , expression] | expression_terminal , [operator , expression] ;
[ "expression", "=", "number", "op_mult", "expression", "|", "expression_terminal", "op_mult", "number", "[", "operator", "expression", "]", "|", "expression_terminal", "op_add", "[", "operator", "expression", "]", "|", "expression_terminal", "[", "operator", "expressio...
python
test
SecurityInnovation/PGPy
pgpy/types.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/types.py#L281-L289
def int_to_bytes(i, minlen=1, order='big'): # pragma: no cover """convert integer to bytes""" blen = max(minlen, PGPObject.int_byte_len(i), 1) if six.PY2: r = iter(_ * 8 for _ in (range(blen) if order == 'little' else range(blen - 1, -1, -1))) return bytes(bytearray((i >> c) & 0xff for c in r)) return i.to_bytes(blen, order)
[ "def", "int_to_bytes", "(", "i", ",", "minlen", "=", "1", ",", "order", "=", "'big'", ")", ":", "# pragma: no cover", "blen", "=", "max", "(", "minlen", ",", "PGPObject", ".", "int_byte_len", "(", "i", ")", ",", "1", ")", "if", "six", ".", "PY2", "...
convert integer to bytes
[ "convert", "integer", "to", "bytes" ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/splittable_tab_widget.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/splittable_tab_widget.py#L1270-L1278
def save_all(self): """ Save all editors. """ for w in self.widgets(): try: self._save(w) except OSError: _logger().exception('failed to save %s', w.file.path)
[ "def", "save_all", "(", "self", ")", ":", "for", "w", "in", "self", ".", "widgets", "(", ")", ":", "try", ":", "self", ".", "_save", "(", "w", ")", "except", "OSError", ":", "_logger", "(", ")", ".", "exception", "(", "'failed to save %s'", ",", "w...
Save all editors.
[ "Save", "all", "editors", "." ]
python
train
aio-libs/aioftp
aioftp/common.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/common.py#L561-L576
def setlocale(name): """ Context manager with threading lock for set locale on enter, and set it back to original state on exit. :: >>> with setlocale("C"): ... ... """ with LOCALE_LOCK: old_locale = locale.setlocale(locale.LC_ALL) try: yield locale.setlocale(locale.LC_ALL, name) finally: locale.setlocale(locale.LC_ALL, old_locale)
[ "def", "setlocale", "(", "name", ")", ":", "with", "LOCALE_LOCK", ":", "old_locale", "=", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ")", "try", ":", "yield", "locale", ".", "setlocale", "(", "locale", ".", "LC_ALL", ",", "name", ")", "f...
Context manager with threading lock for set locale on enter, and set it back to original state on exit. :: >>> with setlocale("C"): ... ...
[ "Context", "manager", "with", "threading", "lock", "for", "set", "locale", "on", "enter", "and", "set", "it", "back", "to", "original", "state", "on", "exit", "." ]
python
valid
ionelmc/python-cogen
cogen/core/proactors/base.py
https://github.com/ionelmc/python-cogen/blob/83b0edb88425eba6e5bfda9f1dcd34642517e2a8/cogen/core/proactors/base.py#L115-L118
def set_options(self, multiplex_first=True, **bogus_options): "Takes implementation specific options. To be overriden in a subclass." self.multiplex_first = multiplex_first self._warn_bogus_options(**bogus_options)
[ "def", "set_options", "(", "self", ",", "multiplex_first", "=", "True", ",", "*", "*", "bogus_options", ")", ":", "self", ".", "multiplex_first", "=", "multiplex_first", "self", ".", "_warn_bogus_options", "(", "*", "*", "bogus_options", ")" ]
Takes implementation specific options. To be overriden in a subclass.
[ "Takes", "implementation", "specific", "options", ".", "To", "be", "overriden", "in", "a", "subclass", "." ]
python
train
readbeyond/aeneas
aeneas/globalfunctions.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/globalfunctions.py#L721-L737
def split_url(url): """ Split the given URL ``base#anchor`` into ``(base, anchor)``, or ``(base, None)`` if no anchor is present. In case there are two or more ``#`` characters, return only the first two tokens: ``a#b#c => (a, b)``. :param string url: the url :rtype: list of str """ if url is None: return (None, None) array = url.split("#") if len(array) == 1: array.append(None) return tuple(array[0:2])
[ "def", "split_url", "(", "url", ")", ":", "if", "url", "is", "None", ":", "return", "(", "None", ",", "None", ")", "array", "=", "url", ".", "split", "(", "\"#\"", ")", "if", "len", "(", "array", ")", "==", "1", ":", "array", ".", "append", "("...
Split the given URL ``base#anchor`` into ``(base, anchor)``, or ``(base, None)`` if no anchor is present. In case there are two or more ``#`` characters, return only the first two tokens: ``a#b#c => (a, b)``. :param string url: the url :rtype: list of str
[ "Split", "the", "given", "URL", "base#anchor", "into", "(", "base", "anchor", ")", "or", "(", "base", "None", ")", "if", "no", "anchor", "is", "present", "." ]
python
train
hydraplatform/hydra-base
hydra_base/lib/network.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/lib/network.py#L2133-L2165
def validate_network_topology(network_id,**kwargs): """ Check for the presence of orphan nodes in a network. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_write_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) nodes = [] for node_i in net_i.nodes: if node_i.status == 'A': nodes.append(node_i.node_id) link_nodes = [] for link_i in net_i.links: if link_i.status != 'A': continue if link_i.node_1_id not in link_nodes: link_nodes.append(link_i.node_1_id) if link_i.node_2_id not in link_nodes: link_nodes.append(link_i.node_2_id) nodes = set(nodes) link_nodes = set(link_nodes) isolated_nodes = nodes - link_nodes return isolated_nodes
[ "def", "validate_network_topology", "(", "network_id", ",", "*", "*", "kwargs", ")", ":", "user_id", "=", "kwargs", ".", "get", "(", "'user_id'", ")", "try", ":", "net_i", "=", "db", ".", "DBSession", ".", "query", "(", "Network", ")", ".", "filter", "...
Check for the presence of orphan nodes in a network.
[ "Check", "for", "the", "presence", "of", "orphan", "nodes", "in", "a", "network", "." ]
python
train
santoshphilip/eppy
eppy/idfreader.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idfreader.py#L69-L81
def makebunches_alter(data, commdct, theidf): """make bunches with data""" bunchdt = {} dt, dtls = data.dt, data.dtls for obj_i, key in enumerate(dtls): key = key.upper() objs = dt[key] list1 = [] for obj in objs: bobj = makeabunch(commdct, obj, obj_i) list1.append(bobj) bunchdt[key] = Idf_MSequence(list1, objs, theidf) return bunchdt
[ "def", "makebunches_alter", "(", "data", ",", "commdct", ",", "theidf", ")", ":", "bunchdt", "=", "{", "}", "dt", ",", "dtls", "=", "data", ".", "dt", ",", "data", ".", "dtls", "for", "obj_i", ",", "key", "in", "enumerate", "(", "dtls", ")", ":", ...
make bunches with data
[ "make", "bunches", "with", "data" ]
python
train
SylvanasSun/python-common-cache
common_cache/eviction.py
https://github.com/SylvanasSun/python-common-cache/blob/f113eb3cd751eed5ab5373e8610a31a444220cf8/common_cache/eviction.py#L43-L84
def lru_for_evict(cache_dict, evict_number=1): """ Use LRU(Least Recently Used) strategy for evicting, the item that last used of time is the earliest will be removed. The parameter cache_dict must is an OrderedDict because its implementation based on the OrderedDict and reinsert key and value when every time to get the cache, this operation will make the cache of the often used is in the tail of OrderedDict and head of the OrderedDict is a cache of the least recently used. Test: >>> import collections >>> from common_cache import CacheItem >>> dict = {'a' : 0, 'b' : 1} >>> lru_for_evict(dict) Traceback (most recent call last): ... ValueError: Not supported type <class 'dict'> >>> cache = collections.OrderedDict() >>> cache['a'] = CacheItem(key='a', value=0, expire=3) >>> cache['b'] = CacheItem(key='b', value=1, expire=3) >>> cache['c'] = CacheItem(key='c', value=2, expire=3) >>> cache['d'] = CacheItem(key='d', value=3, expire=3) >>> lru_for_evict(cache) ['a'] >>> len(cache) 3 >>> lru_for_evict(cache, evict_number=2) ['b', 'c'] >>> len(cache) 1 >>> lru_for_evict(cache, evict_number=10) ['d'] >>> len(cache) 0 """ if not isinstance(cache_dict, collections.OrderedDict): raise ValueError('Not supported type %s' % type(cache_dict)) evicted_keys = [] if len(cache_dict) < evict_number: evict_number = len(cache_dict) for i in range(evict_number): item = cache_dict.popitem(last=False) evicted_keys.append(item[0]) return evicted_keys
[ "def", "lru_for_evict", "(", "cache_dict", ",", "evict_number", "=", "1", ")", ":", "if", "not", "isinstance", "(", "cache_dict", ",", "collections", ".", "OrderedDict", ")", ":", "raise", "ValueError", "(", "'Not supported type %s'", "%", "type", "(", "cache_...
Use LRU(Least Recently Used) strategy for evicting, the item that last used of time is the earliest will be removed. The parameter cache_dict must is an OrderedDict because its implementation based on the OrderedDict and reinsert key and value when every time to get the cache, this operation will make the cache of the often used is in the tail of OrderedDict and head of the OrderedDict is a cache of the least recently used. Test: >>> import collections >>> from common_cache import CacheItem >>> dict = {'a' : 0, 'b' : 1} >>> lru_for_evict(dict) Traceback (most recent call last): ... ValueError: Not supported type <class 'dict'> >>> cache = collections.OrderedDict() >>> cache['a'] = CacheItem(key='a', value=0, expire=3) >>> cache['b'] = CacheItem(key='b', value=1, expire=3) >>> cache['c'] = CacheItem(key='c', value=2, expire=3) >>> cache['d'] = CacheItem(key='d', value=3, expire=3) >>> lru_for_evict(cache) ['a'] >>> len(cache) 3 >>> lru_for_evict(cache, evict_number=2) ['b', 'c'] >>> len(cache) 1 >>> lru_for_evict(cache, evict_number=10) ['d'] >>> len(cache) 0
[ "Use", "LRU", "(", "Least", "Recently", "Used", ")", "strategy", "for", "evicting", "the", "item", "that", "last", "used", "of", "time", "is", "the", "earliest", "will", "be", "removed", ".", "The", "parameter", "cache_dict", "must", "is", "an", "OrderedDi...
python
train
spyder-ide/conda-manager
conda_manager/api/conda_api.py
https://github.com/spyder-ide/conda-manager/blob/89a2126cbecefc92185cf979347ccac1c5ee5d9d/conda_manager/api/conda_api.py#L989-L1000
def _call_pip(self, name=None, prefix=None, extra_args=None, callback=None): """Call pip in QProcess worker.""" cmd_list = self._pip_cmd(name=name, prefix=prefix) cmd_list.extend(extra_args) process_worker = ProcessWorker(cmd_list, pip=True, callback=callback) process_worker.sig_finished.connect(self._start) self._queue.append(process_worker) self._start() return process_worker
[ "def", "_call_pip", "(", "self", ",", "name", "=", "None", ",", "prefix", "=", "None", ",", "extra_args", "=", "None", ",", "callback", "=", "None", ")", ":", "cmd_list", "=", "self", ".", "_pip_cmd", "(", "name", "=", "name", ",", "prefix", "=", "...
Call pip in QProcess worker.
[ "Call", "pip", "in", "QProcess", "worker", "." ]
python
train
dturanski/springcloudstream
springcloudstream/stdio/stream.py
https://github.com/dturanski/springcloudstream/blob/208b542f9eba82e97882d52703af8e965a62a980/springcloudstream/stdio/stream.py#L73-L109
def launch_server(message_handler, options): """ Launch a message server :param handler_function: The handler function to execute for each message :param options: Application options for TCP, etc. """ logger = logging.getLogger(__name__) # if (options.debug): # logger.setLevel(logging.DEBUG) # if not options.monitor_port: # logger.warning( # "Monitoring not enabled. No monitor-port option defined.") # else: # threading.Thread(target=launch_monitor_server, args=(options.host, options.monitor_port, logger)).start() # Create the server, binding to specified host on configured port # logger.info( # 'Starting server on host %s port %d Python version %s.%s.%s' % ((options.host, options.port) + sys.version_info[:3])) # server = ThreadedTCPServer((options.host, options.port), # Activate the server; this will keep running until you # interrupt the program with Ctrl-C try: while True: logger.debug('waiting for more data') if not message_handler.handle(): break logger.warning("I/O stream closed from client") except KeyboardInterrupt: logger.info("I/O stream closed from client exiting...") os._exit(142) except: logger.exception("Error encountered handling message")
[ "def", "launch_server", "(", "message_handler", ",", "options", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "# if (options.debug):", "# logger.setLevel(logging.DEBUG)", "# if not options.monitor_port:", "# logger.warning(", "#...
Launch a message server :param handler_function: The handler function to execute for each message :param options: Application options for TCP, etc.
[ "Launch", "a", "message", "server", ":", "param", "handler_function", ":", "The", "handler", "function", "to", "execute", "for", "each", "message", ":", "param", "options", ":", "Application", "options", "for", "TCP", "etc", "." ]
python
train
sckott/pygbif
pygbif/occurrences/count.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/count.py#L103-L118
def count_publishingcountries(country, **kwargs): ''' Lists occurrence counts for all countries that publish data about the given country :param country: [str] A country, two letter code :return: dict Usage:: from pygbif import occurrences occurrences.count_publishingcountries(country = "DE") ''' url = gbif_baseurl + 'occurrence/counts/publishingCountries' out = gbif_GET(url, {"country": country}, **kwargs) return out
[ "def", "count_publishingcountries", "(", "country", ",", "*", "*", "kwargs", ")", ":", "url", "=", "gbif_baseurl", "+", "'occurrence/counts/publishingCountries'", "out", "=", "gbif_GET", "(", "url", ",", "{", "\"country\"", ":", "country", "}", ",", "*", "*", ...
Lists occurrence counts for all countries that publish data about the given country :param country: [str] A country, two letter code :return: dict Usage:: from pygbif import occurrences occurrences.count_publishingcountries(country = "DE")
[ "Lists", "occurrence", "counts", "for", "all", "countries", "that", "publish", "data", "about", "the", "given", "country" ]
python
train
tilde-lab/tilde
utils/syshwinfo.py
https://github.com/tilde-lab/tilde/blob/59841578b3503075aa85c76f9ae647b3ff92b0a3/utils/syshwinfo.py#L49-L63
def cpuinfo(): """Get the cpu info""" f = open("/proc/cpuinfo") hwinfo = {} for line in f.readlines(): cpul = line.split(":") name = cpul[0].strip() if (len(cpul) > 1): val = cpul[1].strip() if (name == "model name"): hwinfo["CPU"] = val elif (name == "cpu MHz"): hwinfo["MHz"] = int(round(float(val))) f.close() return hwinfo
[ "def", "cpuinfo", "(", ")", ":", "f", "=", "open", "(", "\"/proc/cpuinfo\"", ")", "hwinfo", "=", "{", "}", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "cpul", "=", "line", ".", "split", "(", "\":\"", ")", "name", "=", "cpul", "[", ...
Get the cpu info
[ "Get", "the", "cpu", "info" ]
python
train
eyeseast/python-frontmatter
frontmatter/__init__.py
https://github.com/eyeseast/python-frontmatter/blob/c318e583c48599eb597e0ad59c5d972258c3febc/frontmatter/__init__.py#L117-L130
def loads(text, encoding='utf-8', handler=None, **defaults): """ Parse text (binary or unicode) and return a :py:class:`post <frontmatter.Post>`. :: >>> with open('tests/hello-world.markdown') as f: ... post = frontmatter.loads(f.read()) """ text = u(text, encoding) handler = handler or detect_format(text, handlers) metadata, content = parse(text, encoding, handler, **defaults) return Post(content, handler, **metadata)
[ "def", "loads", "(", "text", ",", "encoding", "=", "'utf-8'", ",", "handler", "=", "None", ",", "*", "*", "defaults", ")", ":", "text", "=", "u", "(", "text", ",", "encoding", ")", "handler", "=", "handler", "or", "detect_format", "(", "text", ",", ...
Parse text (binary or unicode) and return a :py:class:`post <frontmatter.Post>`. :: >>> with open('tests/hello-world.markdown') as f: ... post = frontmatter.loads(f.read())
[ "Parse", "text", "(", "binary", "or", "unicode", ")", "and", "return", "a", ":", "py", ":", "class", ":", "post", "<frontmatter", ".", "Post", ">", "." ]
python
test
mmp2/megaman
megaman/utils/spectral_clustering.py
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/spectral_clustering.py#L94-L193
def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None, renormalize = True, stabalize = True, additional_vectors = 0): """ Spectral clustering for find K clusters by using the eigenvectors of a matrix which is derived from a set of similarities S. Parameters ----------- S: array-like,shape(n_sample,n_sample) similarity matrix K: integer number of K clusters eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : algorithm will attempt to choose the best method for input data 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random.RandomState solver_kwds : any additional keyword arguments to pass to the selected eigen_solver renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1 this can improve label quality stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2 instead of P = D^-1*S additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition. When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then omitted. The remaining K-1 eigenvectors should be informative. Returns ------- labels: array-like, shape (1,n_samples) """ # Step 1: get similarity matrix if geom.affinity_matrix is None: S = geom.compute_affinity_matrix() else: S = geom.affinity_matrix # Check for stability method, symmetric solvers require this if eigen_solver in ['lobpcg', 'amg']: stabalize = True if stabalize: geom.laplacian_type = 'symmetricnormalized' return_lapsym = True else: geom.laplacian_type = 'randomwalk' return_lapsym = False # Step 2: get the Laplacian matrix P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym) # by default the Laplacian is subtracted from the Identify matrix (this step may not be needed) P += identity(P.shape[0]) # Step 3: Compute the top K eigenvectors and drop the first if eigen_solver in ['auto', 'amg', 'lobpcg']: n_components = 2*int(np.log(P.shape[0]))*K + 1 n_components += int(additional_vectors) else: n_components = K n_components = min(n_components, P.shape[0]) (lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver, random_state=random_state, drop_first = True, solver_kwds=solver_kwds) # the first vector is usually uninformative if eigen_solver in ['auto', 'lobpcg', 'amg']: if np.abs(lambdas[0] - 1) > 1e-4: warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter") eigen_vectors = eigen_vectors[:, 1:K] lambdas = lambdas[1:K] # If stability method chosen, adjust eigenvectors if stabalize: w = np.array(geom.laplacian_weights) eigen_vectors /= np.sqrt(w[:,np.newaxis]) eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0) # If renormalize: set each data point to unit length if renormalize: norms = np.linalg.norm(eigen_vectors, axis=1) eigen_vectors /= norms[:,np.newaxis] # Step 4: run k-means clustering labels = k_means_clustering(eigen_vectors,K) return labels, eigen_vectors, P
[ "def", "spectral_clustering", "(", "geom", ",", "K", ",", "eigen_solver", "=", "'dense'", ",", "random_state", "=", "None", ",", "solver_kwds", "=", "None", ",", "renormalize", "=", "True", ",", "stabalize", "=", "True", ",", "additional_vectors", "=", "0", ...
Spectral clustering for find K clusters by using the eigenvectors of a matrix which is derived from a set of similarities S. Parameters ----------- S: array-like,shape(n_sample,n_sample) similarity matrix K: integer number of K clusters eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'} 'auto' : algorithm will attempt to choose the best method for input data 'dense' : use standard dense matrix operations for the eigenvalue decomposition. For this method, M must be an array or matrix type. This method should be avoided for large problems. 'arpack' : use arnoldi iteration in shift-invert mode. For this method, M may be a dense matrix, sparse matrix, or general linear operator. Warning: ARPACK can be unstable for some problems. It is best to try several random seeds in order to check results. 'lobpcg' : Locally Optimal Block Preconditioned Conjugate Gradient Method. A preconditioned eigensolver for large symmetric positive definite (SPD) generalized eigenproblems. 'amg' : AMG requires pyamg to be installed. It can be faster on very large, sparse problems, but may also lead to instabilities. random_state : numpy.RandomState or int, optional The generator or seed used to determine the starting vector for arpack iterations. Defaults to numpy.random.RandomState solver_kwds : any additional keyword arguments to pass to the selected eigen_solver renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1 this can improve label quality stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2 instead of P = D^-1*S additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition. When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then omitted. The remaining K-1 eigenvectors should be informative. Returns ------- labels: array-like, shape (1,n_samples)
[ "Spectral", "clustering", "for", "find", "K", "clusters", "by", "using", "the", "eigenvectors", "of", "a", "matrix", "which", "is", "derived", "from", "a", "set", "of", "similarities", "S", "." ]
python
train
pyviz/holoviews
holoviews/core/options.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/options.py#L1121-L1125
def set_current_backend(cls, backend): "Use this method to set the backend to run the switch hooks" for hook in cls._backend_switch_hooks: hook(backend) cls.current_backend = backend
[ "def", "set_current_backend", "(", "cls", ",", "backend", ")", ":", "for", "hook", "in", "cls", ".", "_backend_switch_hooks", ":", "hook", "(", "backend", ")", "cls", ".", "current_backend", "=", "backend" ]
Use this method to set the backend to run the switch hooks
[ "Use", "this", "method", "to", "set", "the", "backend", "to", "run", "the", "switch", "hooks" ]
python
train
materialsproject/pymatgen
pymatgen/analysis/wulff.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/wulff.py#L58-L70
def get_tri_area(pts): """ Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points """ a, b, c = pts[0], pts[1], pts[2] v1 = np.array(b) - np.array(a) v2 = np.array(c) - np.array(a) area_tri = abs(sp.linalg.norm(sp.cross(v1, v2)) / 2) return area_tri
[ "def", "get_tri_area", "(", "pts", ")", ":", "a", ",", "b", ",", "c", "=", "pts", "[", "0", "]", ",", "pts", "[", "1", "]", ",", "pts", "[", "2", "]", "v1", "=", "np", ".", "array", "(", "b", ")", "-", "np", ".", "array", "(", "a", ")",...
Given a list of coords for 3 points, Compute the area of this triangle. Args: pts: [a, b, c] three points
[ "Given", "a", "list", "of", "coords", "for", "3", "points", "Compute", "the", "area", "of", "this", "triangle", "." ]
python
train
spotify/luigi
luigi/contrib/mongodb.py
https://github.com/spotify/luigi/blob/c5eca1c3c3ee2a7eb612486192a0da146710a1e9/luigi/contrib/mongodb.py#L38-L43
def get_collection(self): """ Return targeted mongo collection to query on """ db_mongo = self._mongo_client[self._index] return db_mongo[self._collection]
[ "def", "get_collection", "(", "self", ")", ":", "db_mongo", "=", "self", ".", "_mongo_client", "[", "self", ".", "_index", "]", "return", "db_mongo", "[", "self", ".", "_collection", "]" ]
Return targeted mongo collection to query on
[ "Return", "targeted", "mongo", "collection", "to", "query", "on" ]
python
train
gitpython-developers/GitPython
git/objects/commit.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/commit.py#L162-L178
def count(self, paths='', **kwargs): """Count the number of commits reachable from this commit :param paths: is an optional path or a list of paths restricting the return value to commits actually containing the paths :param kwargs: Additional options to be passed to git-rev-list. They must not alter the output style of the command, or parsing will yield incorrect results :return: int defining the number of reachable commits""" # yes, it makes a difference whether empty paths are given or not in our case # as the empty paths version will ignore merge commits for some reason. if paths: return len(self.repo.git.rev_list(self.hexsha, '--', paths, **kwargs).splitlines()) else: return len(self.repo.git.rev_list(self.hexsha, **kwargs).splitlines())
[ "def", "count", "(", "self", ",", "paths", "=", "''", ",", "*", "*", "kwargs", ")", ":", "# yes, it makes a difference whether empty paths are given or not in our case", "# as the empty paths version will ignore merge commits for some reason.", "if", "paths", ":", "return", "...
Count the number of commits reachable from this commit :param paths: is an optional path or a list of paths restricting the return value to commits actually containing the paths :param kwargs: Additional options to be passed to git-rev-list. They must not alter the output style of the command, or parsing will yield incorrect results :return: int defining the number of reachable commits
[ "Count", "the", "number", "of", "commits", "reachable", "from", "this", "commit" ]
python
train
pricingassistant/mrq
mrq/job.py
https://github.com/pricingassistant/mrq/blob/d0a5a34de9cba38afa94fb7c9e17f9b570b79a50/mrq/job.py#L177-L185
def save(self): """ Persists the current job metadata to MongoDB. Will be called at each worker report. """ if not self.saved and self.data and "progress" in self.data: # TODO should we save more fields? self.collection.update({"_id": self.id}, {"$set": { "progress": self.data["progress"] }}) self.saved = True
[ "def", "save", "(", "self", ")", ":", "if", "not", "self", ".", "saved", "and", "self", ".", "data", "and", "\"progress\"", "in", "self", ".", "data", ":", "# TODO should we save more fields?", "self", ".", "collection", ".", "update", "(", "{", "\"_id\"",...
Persists the current job metadata to MongoDB. Will be called at each worker report.
[ "Persists", "the", "current", "job", "metadata", "to", "MongoDB", ".", "Will", "be", "called", "at", "each", "worker", "report", "." ]
python
train
apache/airflow
airflow/contrib/hooks/wasb_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/wasb_hook.py#L50-L64
def check_for_blob(self, container_name, blob_name, **kwargs): """ Check if a blob exists on Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.exists()` takes. :type kwargs: object :return: True if the blob exists, False otherwise. :rtype: bool """ return self.connection.exists(container_name, blob_name, **kwargs)
[ "def", "check_for_blob", "(", "self", ",", "container_name", ",", "blob_name", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "connection", ".", "exists", "(", "container_name", ",", "blob_name", ",", "*", "*", "kwargs", ")" ]
Check if a blob exists on Azure Blob Storage. :param container_name: Name of the container. :type container_name: str :param blob_name: Name of the blob. :type blob_name: str :param kwargs: Optional keyword arguments that `BlockBlobService.exists()` takes. :type kwargs: object :return: True if the blob exists, False otherwise. :rtype: bool
[ "Check", "if", "a", "blob", "exists", "on", "Azure", "Blob", "Storage", "." ]
python
test
aio-libs/aioredis
aioredis/commands/hash.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L63-L102
def hmset_dict(self, key, *args, **kwargs): """Set multiple hash fields to multiple values. dict can be passed as first positional argument: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1', 'field2': 'value2'}) or keyword arguments can be used: >>> await redis.hmset_dict( ... 'key', field1='value1', field2='value2') or dict argument can be mixed with kwargs: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1'}, field2='value2') .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary, if both specified and both have same key(s) -- ``kwargs`` will win: >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz') >>> await redis.hget('key', 'foo', encoding='utf-8') 'baz' """ if not args and not kwargs: raise TypeError("args or kwargs must be specified") pairs = () if len(args) > 1: raise TypeError("single positional argument allowed") elif len(args) == 1: if not isinstance(args[0], dict): raise TypeError("args[0] must be dict") elif not args[0] and not kwargs: raise ValueError("args[0] is empty dict") pairs = chain.from_iterable(args[0].items()) kwargs_pairs = chain.from_iterable(kwargs.items()) return wait_ok(self.execute( b'HMSET', key, *chain(pairs, kwargs_pairs)))
[ "def", "hmset_dict", "(", "self", ",", "key", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "args", "and", "not", "kwargs", ":", "raise", "TypeError", "(", "\"args or kwargs must be specified\"", ")", "pairs", "=", "(", ")", "if", "...
Set multiple hash fields to multiple values. dict can be passed as first positional argument: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1', 'field2': 'value2'}) or keyword arguments can be used: >>> await redis.hmset_dict( ... 'key', field1='value1', field2='value2') or dict argument can be mixed with kwargs: >>> await redis.hmset_dict( ... 'key', {'field1': 'value1'}, field2='value2') .. note:: ``dict`` and ``kwargs`` not get mixed into single dictionary, if both specified and both have same key(s) -- ``kwargs`` will win: >>> await redis.hmset_dict('key', {'foo': 'bar'}, foo='baz') >>> await redis.hget('key', 'foo', encoding='utf-8') 'baz'
[ "Set", "multiple", "hash", "fields", "to", "multiple", "values", "." ]
python
train
wummel/linkchecker
third_party/dnspython/dns/name.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/name.py#L375-L429
def to_wire(self, file = None, compress = None, origin = None): """Convert name to wire format, possibly compressing it. @param file: the file where the name is emitted (typically a cStringIO file). If None, a string containing the wire name will be returned. @type file: file or None @param compress: The compression table. If None (the default) names will not be compressed. @type compress: dict @param origin: If the name is relative and origin is not None, then origin will be appended to it. @type origin: dns.name.Name object @raises NeedAbsoluteNameOrOrigin: All names in wire format are absolute. If self is a relative name, then an origin must be supplied; if it is missing, then this exception is raised """ if file is None: file = cStringIO.StringIO() want_return = True else: want_return = False if not self.is_absolute(): if origin is None or not origin.is_absolute(): raise NeedAbsoluteNameOrOrigin labels = list(self.labels) labels.extend(list(origin.labels)) else: labels = self.labels i = 0 for label in labels: n = Name(labels[i:]) i += 1 if not compress is None: pos = compress.get(n) else: pos = None if not pos is None: value = 0xc000 + pos s = struct.pack('!H', value) file.write(s) break else: if not compress is None and len(n) > 1: pos = file.tell() if pos < 0xc000: compress[n] = pos l = len(label) file.write(chr(l)) if l > 0: file.write(label) if want_return: return file.getvalue()
[ "def", "to_wire", "(", "self", ",", "file", "=", "None", ",", "compress", "=", "None", ",", "origin", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "cStringIO", ".", "StringIO", "(", ")", "want_return", "=", "True", "else", "...
Convert name to wire format, possibly compressing it. @param file: the file where the name is emitted (typically a cStringIO file). If None, a string containing the wire name will be returned. @type file: file or None @param compress: The compression table. If None (the default) names will not be compressed. @type compress: dict @param origin: If the name is relative and origin is not None, then origin will be appended to it. @type origin: dns.name.Name object @raises NeedAbsoluteNameOrOrigin: All names in wire format are absolute. If self is a relative name, then an origin must be supplied; if it is missing, then this exception is raised
[ "Convert", "name", "to", "wire", "format", "possibly", "compressing", "it", "." ]
python
train
tariqdaouda/rabaDB
rabaDB/Raba.py
https://github.com/tariqdaouda/rabaDB/blob/42e0d6ee65149ae4f1e4c380cc695a9e7d2d1bbc/rabaDB/Raba.py#L739-L743
def _attachToObject(self, anchorObj, relationName) : "dummy fct for compatibility reasons, a RabaListPupa is attached by default" #MutableSequence.__getattribute__(self, "develop")() self.develop() self._attachToObject(anchorObj, relationName)
[ "def", "_attachToObject", "(", "self", ",", "anchorObj", ",", "relationName", ")", ":", "#MutableSequence.__getattribute__(self, \"develop\")()", "self", ".", "develop", "(", ")", "self", ".", "_attachToObject", "(", "anchorObj", ",", "relationName", ")" ]
dummy fct for compatibility reasons, a RabaListPupa is attached by default
[ "dummy", "fct", "for", "compatibility", "reasons", "a", "RabaListPupa", "is", "attached", "by", "default" ]
python
train
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L1035-L1041
def rgb2hex(rgb): """ Convert RGB(A) tuple to hex. """ if len(rgb) > 3: rgb = rgb[:-1] return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
[ "def", "rgb2hex", "(", "rgb", ")", ":", "if", "len", "(", "rgb", ")", ">", "3", ":", "rgb", "=", "rgb", "[", ":", "-", "1", "]", "return", "\"#{0:02x}{1:02x}{2:02x}\"", ".", "format", "(", "*", "(", "int", "(", "v", "*", "255", ")", "for", "v",...
Convert RGB(A) tuple to hex.
[ "Convert", "RGB", "(", "A", ")", "tuple", "to", "hex", "." ]
python
train
trevisanj/f311
f311/filetypes/filepy.py
https://github.com/trevisanj/f311/blob/9e502a3d1e1f74d4290a8a0bae9a34ef8d7b29f7/f311/filetypes/filepy.py#L49-L72
def _copy_attr(self, module, varname, cls, attrname=None): """ Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname """ if not hasattr(module, varname): raise RuntimeError("Variable '{}' not found".format(varname)) obj = getattr(module, varname) if not isinstance(obj, cls): raise RuntimeError( "Expecting fobj to be a {}, not a '{}'".format(cls.__name__, obj.__class__.__name__)) if attrname is None: attrname = varname setattr(self, attrname, obj)
[ "def", "_copy_attr", "(", "self", ",", "module", ",", "varname", ",", "cls", ",", "attrname", "=", "None", ")", ":", "if", "not", "hasattr", "(", "module", ",", "varname", ")", ":", "raise", "RuntimeError", "(", "\"Variable '{}' not found\"", ".", "format"...
Copies attribute from module object to self. Raises if object not of expected class Args: module: module object varname: variable name cls: expected class of variable attrname: attribute name of self. Falls back to varname
[ "Copies", "attribute", "from", "module", "object", "to", "self", ".", "Raises", "if", "object", "not", "of", "expected", "class" ]
python
train
jantman/awslimitchecker
awslimitchecker/services/ebs.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/awslimitchecker/services/ebs.py#L170-L251
def _get_limits_ebs(self): """ Return a dict of EBS-related limits only. This method should only be used internally by :py:meth:~.get_limits`. :rtype: dict """ limits = {} limits['Provisioned IOPS'] = AwsLimit( 'Provisioned IOPS', self, 200000, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io1', ) limits['Provisioned IOPS (SSD) storage (GiB)'] = AwsLimit( 'Provisioned IOPS (SSD) storage (GiB)', self, 102400, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='io1', ) limits['General Purpose (SSD) volume storage (GiB)'] = AwsLimit( 'General Purpose (SSD) volume storage (GiB)', self, 102400, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='gp2', ta_limit_name='General Purpose SSD (gp2) volume storage (GiB)' ) limits['Magnetic volume storage (GiB)'] = AwsLimit( 'Magnetic volume storage (GiB)', self, 20480, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='standard', ta_limit_name='Magnetic (standard) volume storage (GiB)' ) limits['Throughput Optimized (HDD) volume storage (GiB)'] = AwsLimit( 'Throughput Optimized (HDD) volume storage (GiB)', self, 307200, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='st1', ) limits['Cold (HDD) volume storage (GiB)'] = AwsLimit( 'Cold (HDD) volume storage (GiB)', self, 307200, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', limit_subtype='sc1', ) limits['Active snapshots'] = AwsLimit( 'Active snapshots', self, 10000, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::VolumeSnapshot', ) limits['Active volumes'] = AwsLimit( 'Active volumes', self, 5000, self.warning_threshold, self.critical_threshold, limit_type='AWS::EC2::Volume', ) return limits
[ "def", "_get_limits_ebs", "(", "self", ")", ":", "limits", "=", "{", "}", "limits", "[", "'Provisioned IOPS'", "]", "=", "AwsLimit", "(", "'Provisioned IOPS'", ",", "self", ",", "200000", ",", "self", ".", "warning_threshold", ",", "self", ".", "critical_thr...
Return a dict of EBS-related limits only. This method should only be used internally by :py:meth:~.get_limits`. :rtype: dict
[ "Return", "a", "dict", "of", "EBS", "-", "related", "limits", "only", ".", "This", "method", "should", "only", "be", "used", "internally", "by", ":", "py", ":", "meth", ":", "~", ".", "get_limits", "." ]
python
train
sosy-lab/benchexec
benchexec/check_cgroups.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/check_cgroups.py#L36-L80
def check_cgroup_availability(wait=1): """ Basic utility to check the availability and permissions of cgroups. This will log some warnings for the user if necessary. On some systems, daemons such as cgrulesengd might interfere with the cgroups of a process soon after it was started. Thus this function starts a process, waits a configurable amount of time, and check whether the cgroups have been changed. @param wait: a non-negative int that is interpreted as seconds to wait during the check @raise SystemExit: if cgroups are not usable """ logging.basicConfig(format="%(levelname)s: %(message)s") runexecutor = RunExecutor() my_cgroups = runexecutor.cgroups if not (CPUACCT in my_cgroups and CPUSET in my_cgroups and # FREEZER in my_cgroups and # For now, we do not require freezer MEMORY in my_cgroups): sys.exit(1) with tempfile.NamedTemporaryFile(mode='rt') as tmp: runexecutor.execute_run(['sh', '-c', 'sleep {0}; cat /proc/self/cgroup'.format(wait)], tmp.name, memlimit=1024*1024, # set memlimit to force check for swapaccount # set cores and memory_nodes to force usage of CPUSET cores=util.parse_int_list(my_cgroups.get_value(CPUSET, 'cpus')), memory_nodes=my_cgroups.read_allowed_memory_banks()) lines = [] for line in tmp: line = line.strip() if line and not line == "sh -c 'sleep {0}; cat /proc/self/cgroup'".format(wait) \ and not all(c == '-' for c in line): lines.append(line) task_cgroups = find_my_cgroups(lines) fail = False for subsystem in CPUACCT, CPUSET, MEMORY, FREEZER: if subsystem in my_cgroups: if not task_cgroups[subsystem].startswith(os.path.join(my_cgroups[subsystem], 'benchmark_')): logging.warning('Task was in cgroup %s for subsystem %s, ' 'which is not the expected sub-cgroup of %s. ' 'Maybe some other program is interfering with cgroup management?', task_cgroups[subsystem], subsystem, my_cgroups[subsystem]) fail = True if fail: sys.exit(1)
[ "def", "check_cgroup_availability", "(", "wait", "=", "1", ")", ":", "logging", ".", "basicConfig", "(", "format", "=", "\"%(levelname)s: %(message)s\"", ")", "runexecutor", "=", "RunExecutor", "(", ")", "my_cgroups", "=", "runexecutor", ".", "cgroups", "if", "n...
Basic utility to check the availability and permissions of cgroups. This will log some warnings for the user if necessary. On some systems, daemons such as cgrulesengd might interfere with the cgroups of a process soon after it was started. Thus this function starts a process, waits a configurable amount of time, and check whether the cgroups have been changed. @param wait: a non-negative int that is interpreted as seconds to wait during the check @raise SystemExit: if cgroups are not usable
[ "Basic", "utility", "to", "check", "the", "availability", "and", "permissions", "of", "cgroups", ".", "This", "will", "log", "some", "warnings", "for", "the", "user", "if", "necessary", ".", "On", "some", "systems", "daemons", "such", "as", "cgrulesengd", "m...
python
train
rosenbrockc/acorn
acorn/logging/descriptors.py
https://github.com/rosenbrockc/acorn/blob/9a44d1a1ad8bfc2c54a6b56d9efe54433a797820/acorn/logging/descriptors.py#L126-L138
def _array_convert(a): """Converts the specified value to a list if it is a :class:`numpy.ndarray`; otherwise it is just returned as is. """ from numpy import ndarray if isinstance(a, ndarray): larr = a.tolist() if len(larr) == 1: return larr[0] else: return larr else: return a
[ "def", "_array_convert", "(", "a", ")", ":", "from", "numpy", "import", "ndarray", "if", "isinstance", "(", "a", ",", "ndarray", ")", ":", "larr", "=", "a", ".", "tolist", "(", ")", "if", "len", "(", "larr", ")", "==", "1", ":", "return", "larr", ...
Converts the specified value to a list if it is a :class:`numpy.ndarray`; otherwise it is just returned as is.
[ "Converts", "the", "specified", "value", "to", "a", "list", "if", "it", "is", "a", ":", "class", ":", "numpy", ".", "ndarray", ";", "otherwise", "it", "is", "just", "returned", "as", "is", "." ]
python
train
vstconsulting/vstutils
vstutils/ldap_utils.py
https://github.com/vstconsulting/vstutils/blob/3d6d140c2463952dc9835a4e40caf758468b3049/vstutils/ldap_utils.py#L135-L142
def isAuth(self): ''' Indicates that object auth worked :return: True or False ''' if isinstance(self.__conn, ldap.ldapobject.LDAPObject) or self.__conn: return True return False
[ "def", "isAuth", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "__conn", ",", "ldap", ".", "ldapobject", ".", "LDAPObject", ")", "or", "self", ".", "__conn", ":", "return", "True", "return", "False" ]
Indicates that object auth worked :return: True or False
[ "Indicates", "that", "object", "auth", "worked", ":", "return", ":", "True", "or", "False" ]
python
train
cltk/cltk
cltk/phonology/old_norse/transcription.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/old_norse/transcription.py#L66-L89
def phonetic_u_umlaut(sound: Vowel) -> Vowel: """ >>> umlaut_a = OldNorsePhonology.phonetic_u_umlaut(a) >>> umlaut_a.ipar 'ø' >>> umlaut_o = OldNorsePhonology.phonetic_u_umlaut(o) >>> umlaut_o.ipar 'u' >>> umlaut_e = OldNorsePhonology.phonetic_u_umlaut(e) >>> umlaut_e.ipar 'e' :param sound: instance of Vowel :return: """ if sound.is_equal(a): return oee # or oe elif sound.is_equal(o): return u else: return sound
[ "def", "phonetic_u_umlaut", "(", "sound", ":", "Vowel", ")", "->", "Vowel", ":", "if", "sound", ".", "is_equal", "(", "a", ")", ":", "return", "oee", "# or oe", "elif", "sound", ".", "is_equal", "(", "o", ")", ":", "return", "u", "else", ":", "return...
>>> umlaut_a = OldNorsePhonology.phonetic_u_umlaut(a) >>> umlaut_a.ipar 'ø' >>> umlaut_o = OldNorsePhonology.phonetic_u_umlaut(o) >>> umlaut_o.ipar 'u' >>> umlaut_e = OldNorsePhonology.phonetic_u_umlaut(e) >>> umlaut_e.ipar 'e' :param sound: instance of Vowel :return:
[ ">>>", "umlaut_a", "=", "OldNorsePhonology", ".", "phonetic_u_umlaut", "(", "a", ")", ">>>", "umlaut_a", ".", "ipar", "ø" ]
python
train
Alignak-monitoring/alignak
alignak/http/arbiter_interface.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/http/arbiter_interface.py#L1663-L1684
def _do_not_run(self): """The master arbiter tells to its spare arbiters to not run. A master arbiter will ignore this request and it will return an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: None """ # If I'm the master, ignore the command and raise a log if self.app.is_master: message = "Received message to not run. " \ "I am the Master arbiter, ignore and continue to run." logger.warning(message) return {'_status': u'ERR', '_message': message} # Else, I'm just a spare, so I listen to my master logger.debug("Received message to not run. I am the spare, stopping.") self.app.last_master_speak = time.time() self.app.must_run = False return {'_status': u'OK', '_message': message}
[ "def", "_do_not_run", "(", "self", ")", ":", "# If I'm the master, ignore the command and raise a log", "if", "self", ".", "app", ".", "is_master", ":", "message", "=", "\"Received message to not run. \"", "\"I am the Master arbiter, ignore and continue to run.\"", "logger", "....
The master arbiter tells to its spare arbiters to not run. A master arbiter will ignore this request and it will return an object containing some properties: '_status': 'ERR' because of the error `_message`: some more explanations about the error :return: None
[ "The", "master", "arbiter", "tells", "to", "its", "spare", "arbiters", "to", "not", "run", "." ]
python
train
bwohlberg/sporco
sporco/admm/parcbpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L871-L878
def terminate_pool(self): """Terminate and close the multiprocessing pool if necessary.""" if self.pool is not None: self.pool.terminate() self.pool.join() del(self.pool) self.pool = None
[ "def", "terminate_pool", "(", "self", ")", ":", "if", "self", ".", "pool", "is", "not", "None", ":", "self", ".", "pool", ".", "terminate", "(", ")", "self", ".", "pool", ".", "join", "(", ")", "del", "(", "self", ".", "pool", ")", "self", ".", ...
Terminate and close the multiprocessing pool if necessary.
[ "Terminate", "and", "close", "the", "multiprocessing", "pool", "if", "necessary", "." ]
python
train
ceph/ceph-deploy
ceph_deploy/hosts/remotes.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/hosts/remotes.py#L331-L345
def which(executable): """find the location of an executable""" locations = ( '/usr/local/bin', '/bin', '/usr/bin', '/usr/local/sbin', '/usr/sbin', '/sbin', ) for location in locations: executable_path = os.path.join(location, executable) if os.path.exists(executable_path) and os.path.isfile(executable_path): return executable_path
[ "def", "which", "(", "executable", ")", ":", "locations", "=", "(", "'/usr/local/bin'", ",", "'/bin'", ",", "'/usr/bin'", ",", "'/usr/local/sbin'", ",", "'/usr/sbin'", ",", "'/sbin'", ",", ")", "for", "location", "in", "locations", ":", "executable_path", "=",...
find the location of an executable
[ "find", "the", "location", "of", "an", "executable" ]
python
train
elliterate/capybara.py
capybara/session_matchers.py
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/session_matchers.py#L6-L30
def assert_current_path(self, path, **kwargs): """ Asserts that the page has the given path. By default this will compare against the path+query portion of the full URL. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time. """ query = CurrentPathQuery(path, **kwargs) @self.document.synchronize def assert_current_path(): if not query.resolves_for(self): raise ExpectationNotMet(query.failure_message) assert_current_path() return True
[ "def", "assert_current_path", "(", "self", ",", "path", ",", "*", "*", "kwargs", ")", ":", "query", "=", "CurrentPathQuery", "(", "path", ",", "*", "*", "kwargs", ")", "@", "self", ".", "document", ".", "synchronize", "def", "assert_current_path", "(", "...
Asserts that the page has the given path. By default this will compare against the path+query portion of the full URL. Args: path (str | RegexObject): The string or regex that the current "path" should match. **kwargs: Arbitrary keyword arguments for :class:`CurrentPathQuery`. Returns: True Raises: ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
[ "Asserts", "that", "the", "page", "has", "the", "given", "path", ".", "By", "default", "this", "will", "compare", "against", "the", "path", "+", "query", "portion", "of", "the", "full", "URL", "." ]
python
test
ethereum/py-evm
eth/vm/forks/byzantium/headers.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/vm/forks/byzantium/headers.py#L38-L73
def compute_difficulty( bomb_delay: int, parent_header: BlockHeader, timestamp: int) -> int: """ https://github.com/ethereum/EIPs/issues/100 """ parent_timestamp = parent_header.timestamp validate_gt(timestamp, parent_timestamp, title="Header.timestamp") parent_difficulty = parent_header.difficulty offset = parent_difficulty // DIFFICULTY_ADJUSTMENT_DENOMINATOR has_uncles = parent_header.uncles_hash != EMPTY_UNCLE_HASH adj_factor = max( ( (2 if has_uncles else 1) - ((timestamp - parent_timestamp) // BYZANTIUM_DIFFICULTY_ADJUSTMENT_CUTOFF) ), -99, ) difficulty = max( parent_difficulty + offset * adj_factor, min(parent_header.difficulty, DIFFICULTY_MINIMUM) ) num_bomb_periods = ( max( 0, parent_header.block_number + 1 - bomb_delay, ) // BOMB_EXPONENTIAL_PERIOD ) - BOMB_EXPONENTIAL_FREE_PERIODS if num_bomb_periods >= 0: return max(difficulty + 2**num_bomb_periods, DIFFICULTY_MINIMUM) else: return difficulty
[ "def", "compute_difficulty", "(", "bomb_delay", ":", "int", ",", "parent_header", ":", "BlockHeader", ",", "timestamp", ":", "int", ")", "->", "int", ":", "parent_timestamp", "=", "parent_header", ".", "timestamp", "validate_gt", "(", "timestamp", ",", "parent_t...
https://github.com/ethereum/EIPs/issues/100
[ "https", ":", "//", "github", ".", "com", "/", "ethereum", "/", "EIPs", "/", "issues", "/", "100" ]
python
train
UCSBarchlab/PyRTL
pyrtl/rtllib/muxes.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/muxes.py#L178-L187
def finalize(self): """ Connects the wires. """ self._check_finalized() self._final = True for dest_w, values in self.dest_instrs_info.items(): mux_vals = dict(zip(self.instructions, values)) dest_w <<= sparse_mux(self.signal_wire, mux_vals)
[ "def", "finalize", "(", "self", ")", ":", "self", ".", "_check_finalized", "(", ")", "self", ".", "_final", "=", "True", "for", "dest_w", ",", "values", "in", "self", ".", "dest_instrs_info", ".", "items", "(", ")", ":", "mux_vals", "=", "dict", "(", ...
Connects the wires.
[ "Connects", "the", "wires", "." ]
python
train
radujica/baloo
baloo/core/frame.py
https://github.com/radujica/baloo/blob/f6e05e35b73a75e8a300754c6bdc575e5f2d53b9/baloo/core/frame.py#L442-L458
def evaluate(self, verbose=False, decode=True, passes=None, num_threads=1, apply_experimental=True): """Evaluates by creating a DataFrame containing evaluated data and index. See `LazyResult` Returns ------- DataFrame DataFrame with evaluated data and index. """ evaluated_index = self.index.evaluate(verbose, decode, passes, num_threads, apply_experimental) evaluated_data = OrderedDict((column.name, column.evaluate(verbose, decode, passes, num_threads, apply_experimental)) for column in self._iter()) return DataFrame(evaluated_data, evaluated_index)
[ "def", "evaluate", "(", "self", ",", "verbose", "=", "False", ",", "decode", "=", "True", ",", "passes", "=", "None", ",", "num_threads", "=", "1", ",", "apply_experimental", "=", "True", ")", ":", "evaluated_index", "=", "self", ".", "index", ".", "ev...
Evaluates by creating a DataFrame containing evaluated data and index. See `LazyResult` Returns ------- DataFrame DataFrame with evaluated data and index.
[ "Evaluates", "by", "creating", "a", "DataFrame", "containing", "evaluated", "data", "and", "index", "." ]
python
train
Chilipp/model-organization
model_organization/config.py
https://github.com/Chilipp/model-organization/blob/694d1219c7ed7e1b2b17153afa11bdc21169bca2/model_organization/config.py#L276-L301
def exp_files(self): """A mapping from experiment to experiment configuration file Note that this attribute only contains experiments whose configuration has already dumped to the file! """ ret = OrderedDict() # restore the order of the experiments exp_file = self.exp_file if osp.exists(exp_file): for key, val in safe_load(exp_file).items(): ret[key] = val for project, d in self.projects.items(): project_path = d['root'] config_path = osp.join(project_path, '.project') if not osp.exists(config_path): continue for fname in glob.glob(osp.join(config_path, '*.yml')): if fname == '.project.yml': continue exp = osp.splitext(osp.basename(fname))[0] if not isinstance(ret.get(exp), Archive): ret[exp] = osp.join(config_path, exp + '.yml') if exp not in self._project_map[project]: self._project_map[project].append(exp) return ret
[ "def", "exp_files", "(", "self", ")", ":", "ret", "=", "OrderedDict", "(", ")", "# restore the order of the experiments", "exp_file", "=", "self", ".", "exp_file", "if", "osp", ".", "exists", "(", "exp_file", ")", ":", "for", "key", ",", "val", "in", "safe...
A mapping from experiment to experiment configuration file Note that this attribute only contains experiments whose configuration has already dumped to the file!
[ "A", "mapping", "from", "experiment", "to", "experiment", "configuration", "file" ]
python
train
FlorianRhiem/pyGLFW
glfw/__init__.py
https://github.com/FlorianRhiem/pyGLFW/blob/87767dfbe15ba15d2a8338cdfddf6afc6a25dff5/glfw/__init__.py#L118-L124
def unwrap(self): """ Returns a GLFWvidmode object. """ size = self.Size(self.width, self.height) bits = self.Bits(self.red_bits, self.green_bits, self.blue_bits) return self.GLFWvidmode(size, bits, self.refresh_rate)
[ "def", "unwrap", "(", "self", ")", ":", "size", "=", "self", ".", "Size", "(", "self", ".", "width", ",", "self", ".", "height", ")", "bits", "=", "self", ".", "Bits", "(", "self", ".", "red_bits", ",", "self", ".", "green_bits", ",", "self", "."...
Returns a GLFWvidmode object.
[ "Returns", "a", "GLFWvidmode", "object", "." ]
python
train
cdeboever3/cdpybio
cdpybio/bedtools.py
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/bedtools.py#L348-L385
def combine(beds, beds_sorted=False, postmerge=True): """ Combine a list of bed files or BedTool objects into a single BedTool object. Parameters ---------- beds : list List of paths to bed files or BedTool objects. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. postmerge : boolean Whether to merge intervals after combining beds together. Returns ------- out : pybedtools.BedTool New sorted BedTool with intervals from all input beds. """ beds = copy.deepcopy(beds) for i,v in enumerate(beds): if type(v) == str: beds[i] = pbt.BedTool(v) if not beds_sorted: beds[i] = beds[i].sort() # For some reason, doing the merging in the reduce statement doesn't work. I # think this might be a pybedtools bug. In any fashion, I can merge # afterward although I think it makes a performance hit because the combined # bed file grows larger than it needs to. out = reduce(lambda x,y : x.cat(y, postmerge=False), beds) out = out.sort() if postmerge: out = out.merge() return out
[ "def", "combine", "(", "beds", ",", "beds_sorted", "=", "False", ",", "postmerge", "=", "True", ")", ":", "beds", "=", "copy", ".", "deepcopy", "(", "beds", ")", "for", "i", ",", "v", "in", "enumerate", "(", "beds", ")", ":", "if", "type", "(", "...
Combine a list of bed files or BedTool objects into a single BedTool object. Parameters ---------- beds : list List of paths to bed files or BedTool objects. beds_sorted : boolean Whether the bed files in beds are already sorted. If False, all bed files in beds will be sorted. postmerge : boolean Whether to merge intervals after combining beds together. Returns ------- out : pybedtools.BedTool New sorted BedTool with intervals from all input beds.
[ "Combine", "a", "list", "of", "bed", "files", "or", "BedTool", "objects", "into", "a", "single", "BedTool", "object", "." ]
python
train
MozillaSecurity/fuzzfetch
src/fuzzfetch/fetch.py
https://github.com/MozillaSecurity/fuzzfetch/blob/166cbfc71b679db019b9ac777dce12ccfdfc2c10/src/fuzzfetch/fetch.py#L386-L390
def iterall(cls, target, branch, build, flags, platform=None): """Return an iterable for all available builds matching a particular build type""" flags = BuildFlags(*flags) for task in BuildTask.iterall(build, branch, flags, platform): yield cls(target, branch, task, flags, platform)
[ "def", "iterall", "(", "cls", ",", "target", ",", "branch", ",", "build", ",", "flags", ",", "platform", "=", "None", ")", ":", "flags", "=", "BuildFlags", "(", "*", "flags", ")", "for", "task", "in", "BuildTask", ".", "iterall", "(", "build", ",", ...
Return an iterable for all available builds matching a particular build type
[ "Return", "an", "iterable", "for", "all", "available", "builds", "matching", "a", "particular", "build", "type" ]
python
train
angr/angr
angr/analyses/cfg/cfg_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L1711-L1922
def _graph_traversal_handler(self, g, src, dst, data, blockaddr_to_function, known_functions, all_edges): """ Graph traversal handler. It takes in a node or an edge, and create new functions or add nodes to existing functions accordingly. Oh, it also create edges on the transition map of functions. :param g: The control flow graph that is currently being traversed. :param CFGNode src: Beginning of the edge, or a single node when dst is None. :param CFGNode dst: Destination of the edge. For processing a single node, `dst` is None. :param dict data: Edge data in the CFG. 'jumpkind' should be there if it's not None. :param dict blockaddr_to_function: A mapping between block addresses to Function instances. :param angr.knowledge_plugins.FunctionManager known_functions: Already recovered functions. :param list or None all_edges: All edges going out from src. :return: None """ src_addr = src.addr src_function = self._addr_to_function(src_addr, blockaddr_to_function, known_functions) if src_addr not in src_function.block_addrs_set: n = self.model.get_any_node(src_addr) if n is None: node = src_addr else: node = self._to_snippet(n) self.kb.functions._add_node(src_function.addr, node) if data is None: # it's a single node only return jumpkind = data['jumpkind'] if jumpkind == 'Ijk_Ret': n = self.model.get_any_node(src_addr) if n is None: from_node = src_addr else: from_node = self._to_snippet(n) self.kb.functions._add_return_from(src_function.addr, from_node, None) if dst is None: return dst_addr = dst.addr # get instruction address and statement index ins_addr = data.get('ins_addr', None) stmt_idx = data.get('stmt_idx', None) if jumpkind == 'Ijk_Call' or jumpkind.startswith('Ijk_Sys'): is_syscall = jumpkind.startswith('Ijk_Sys') # It must be calling a function dst_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) n = self.model.get_any_node(src_addr) if n is None: src_snippet = self._to_snippet(addr=src_addr, base_state=self._base_state) else: src_snippet = self._to_snippet(cfg_node=n) # HACK: FIXME: We need a better way of representing unresolved calls and whether they return. # For now, assume UnresolvedTarget returns if we're calling to it # If the function doesn't return, don't add a fakeret! if not all_edges or (dst_function.returning is False and not dst_function.name == 'UnresolvableCallTarget'): fakeret_node = None else: fakeret_node = self._one_fakeret_node(all_edges) if fakeret_node is None: fakeret_snippet = None else: fakeret_snippet = self._to_snippet(cfg_node=fakeret_node) if isinstance(dst_addr, SootAddressDescriptor): dst_addr = dst_addr.method self.kb.functions._add_call_to(src_function.addr, src_snippet, dst_addr, fakeret_snippet, syscall=is_syscall, ins_addr=ins_addr, stmt_idx=stmt_idx) if dst_function.returning: returning_target = src.addr + src.size if returning_target not in blockaddr_to_function: if returning_target not in known_functions: blockaddr_to_function[returning_target] = src_function else: self._addr_to_function(returning_target, blockaddr_to_function, known_functions) to_outside = not blockaddr_to_function[returning_target] is src_function n = self.model.get_any_node(returning_target) if n is None: returning_snippet = self._to_snippet(addr=returning_target, base_state=self._base_state) else: returning_snippet = self._to_snippet(cfg_node=n) self.kb.functions._add_fakeret_to(src_function.addr, src_snippet, returning_snippet, confirmed=True, to_outside=to_outside ) elif jumpkind in ('Ijk_Boring', 'Ijk_InvalICache'): # convert src_addr and dst_addr to CodeNodes n = self.model.get_any_node(src_addr) if n is None: src_node = src_addr else: src_node = self._to_snippet(cfg_node=n) n = self.model.get_any_node(dst_addr) if n is None: dst_node = dst_addr else: dst_node = self._to_snippet(cfg_node=n) # pre-check: if source and destination do not belong to the same section, it must be jumping to another # function belong_to_same_section = self._addrs_belong_to_same_section(src_addr, dst_addr) if not belong_to_same_section: _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) if self._detect_tail_calls: if self._is_tail_call_optimization(g, src_addr, dst_addr, src_function, all_edges, known_functions, blockaddr_to_function): l.debug("Possible tail-call optimization detected at function %#x.", dst_addr) # it's (probably) a tail-call optimization. we should make the destination node a new function # instead. blockaddr_to_function.pop(dst_addr, None) _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) self.kb.functions._add_outside_transition_to(src_function.addr, src_node, dst_node, to_function_addr=dst_addr ) # is it a jump to another function? if isinstance(dst_addr, SootAddressDescriptor): is_known_function_addr = dst_addr.method in known_functions and dst_addr.method.addr == dst_addr else: is_known_function_addr = dst_addr in known_functions if is_known_function_addr or ( dst_addr in blockaddr_to_function and blockaddr_to_function[dst_addr] is not src_function ): # yes it is dst_function_addr = blockaddr_to_function[dst_addr].addr if dst_addr in blockaddr_to_function else \ dst_addr self.kb.functions._add_outside_transition_to(src_function.addr, src_node, dst_node, to_function_addr=dst_function_addr ) _ = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) else: # no it's not # add the transition code if dst_addr not in blockaddr_to_function: blockaddr_to_function[dst_addr] = src_function self.kb.functions._add_transition_to(src_function.addr, src_node, dst_node, ins_addr=ins_addr, stmt_idx=stmt_idx ) elif jumpkind == 'Ijk_FakeRet': # convert src_addr and dst_addr to CodeNodes n = self.model.get_any_node(src_addr) if n is None: src_node = src_addr else: src_node = self._to_snippet(n) n = self.model.get_any_node(dst_addr) if n is None: dst_node = dst_addr else: dst_node = self._to_snippet(n) if dst_addr not in blockaddr_to_function: if isinstance(dst_addr, SootAddressDescriptor): if dst_addr.method not in known_functions: blockaddr_to_function[dst_addr] = src_function target_function = src_function else: target_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) else: if dst_addr not in known_functions: blockaddr_to_function[dst_addr] = src_function target_function = src_function else: target_function = self._addr_to_function(dst_addr, blockaddr_to_function, known_functions) else: target_function = blockaddr_to_function[dst_addr] # Figure out if the function called (not the function returned to) returns. # We may have determined that this does not happen, since the time this path # was scheduled for exploration called_function = None # Try to find the call that this fakeret goes with for _, d, e in all_edges: if e['jumpkind'] == 'Ijk_Call': if d.addr in blockaddr_to_function: called_function = blockaddr_to_function[d.addr] break # We may have since figured out that the called function doesn't ret. # It's important to assume that all unresolved targets do return if called_function is not None and \ called_function.returning is False: return to_outside = not target_function is src_function # FIXME: Not sure we should confirm this fakeret or not. self.kb.functions._add_fakeret_to(src_function.addr, src_node, dst_node, confirmed=True, to_outside=to_outside, to_function_addr=target_function.addr ) else: l.debug('Ignored jumpkind %s', jumpkind)
[ "def", "_graph_traversal_handler", "(", "self", ",", "g", ",", "src", ",", "dst", ",", "data", ",", "blockaddr_to_function", ",", "known_functions", ",", "all_edges", ")", ":", "src_addr", "=", "src", ".", "addr", "src_function", "=", "self", ".", "_addr_to_...
Graph traversal handler. It takes in a node or an edge, and create new functions or add nodes to existing functions accordingly. Oh, it also create edges on the transition map of functions. :param g: The control flow graph that is currently being traversed. :param CFGNode src: Beginning of the edge, or a single node when dst is None. :param CFGNode dst: Destination of the edge. For processing a single node, `dst` is None. :param dict data: Edge data in the CFG. 'jumpkind' should be there if it's not None. :param dict blockaddr_to_function: A mapping between block addresses to Function instances. :param angr.knowledge_plugins.FunctionManager known_functions: Already recovered functions. :param list or None all_edges: All edges going out from src. :return: None
[ "Graph", "traversal", "handler", ".", "It", "takes", "in", "a", "node", "or", "an", "edge", "and", "create", "new", "functions", "or", "add", "nodes", "to", "existing", "functions", "accordingly", ".", "Oh", "it", "also", "create", "edges", "on", "the", ...
python
train
btel/svg_utils
src/svgutils/transform.py
https://github.com/btel/svg_utils/blob/ee00726ebed1bd97fd496b15b6a8e7f233ebb5e3/src/svgutils/transform.py#L337-L390
def from_mpl(fig, savefig_kw=None): """Create a SVG figure from a ``matplotlib`` figure. Parameters ---------- fig : matplotlib.Figure instance savefig_kw : dict keyword arguments to be passed to matplotlib's `savefig` Returns ------- SVGFigure newly created :py:class:`SVGFigure` initialised with the string content. Examples -------- If you want to overlay the figure on another SVG, you may want to pass the `transparent` option: >>> from svgutils import transform >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> line, = plt.plot([1,2]) >>> svgfig = transform.from_mpl(fig, ... savefig_kw=dict(transparent=True)) >>> svgfig.getroot() <svgutils.transform.GroupElement object at ...> """ fid = StringIO() if savefig_kw is None: savefig_kw = {} try: fig.savefig(fid, format='svg', **savefig_kw) except ValueError: raise(ValueError, "No matplotlib SVG backend") fid.seek(0) fig = fromstring(fid.read()) # workaround mpl units bug w, h = fig.get_size() fig.set_size((w.replace('pt', ''), h.replace('pt', ''))) return fig
[ "def", "from_mpl", "(", "fig", ",", "savefig_kw", "=", "None", ")", ":", "fid", "=", "StringIO", "(", ")", "if", "savefig_kw", "is", "None", ":", "savefig_kw", "=", "{", "}", "try", ":", "fig", ".", "savefig", "(", "fid", ",", "format", "=", "'svg'...
Create a SVG figure from a ``matplotlib`` figure. Parameters ---------- fig : matplotlib.Figure instance savefig_kw : dict keyword arguments to be passed to matplotlib's `savefig` Returns ------- SVGFigure newly created :py:class:`SVGFigure` initialised with the string content. Examples -------- If you want to overlay the figure on another SVG, you may want to pass the `transparent` option: >>> from svgutils import transform >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> line, = plt.plot([1,2]) >>> svgfig = transform.from_mpl(fig, ... savefig_kw=dict(transparent=True)) >>> svgfig.getroot() <svgutils.transform.GroupElement object at ...>
[ "Create", "a", "SVG", "figure", "from", "a", "matplotlib", "figure", "." ]
python
train
joshspeagle/dynesty
dynesty/bounding.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L253-L263
def unitcube_overlap(self, ndraws=10000, rstate=None): """Using `ndraws` Monte Carlo draws, estimate the fraction of overlap between the ellipsoid and the unit cube.""" if rstate is None: rstate = np.random samples = [self.sample(rstate=rstate) for i in range(ndraws)] nin = sum([unitcheck(x) for x in samples]) return 1. * nin / ndraws
[ "def", "unitcube_overlap", "(", "self", ",", "ndraws", "=", "10000", ",", "rstate", "=", "None", ")", ":", "if", "rstate", "is", "None", ":", "rstate", "=", "np", ".", "random", "samples", "=", "[", "self", ".", "sample", "(", "rstate", "=", "rstate"...
Using `ndraws` Monte Carlo draws, estimate the fraction of overlap between the ellipsoid and the unit cube.
[ "Using", "ndraws", "Monte", "Carlo", "draws", "estimate", "the", "fraction", "of", "overlap", "between", "the", "ellipsoid", "and", "the", "unit", "cube", "." ]
python
train
python-openxml/python-docx
docx/image/tiff.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/image/tiff.py#L288-L294
def _parse_value(cls, stream_rdr, offset, value_count, value_offset): """ Return the ASCII string parsed from *stream_rdr* at *value_offset*. The length of the string, including a terminating '\x00' (NUL) character, is in *value_count*. """ return stream_rdr.read_str(value_count-1, value_offset)
[ "def", "_parse_value", "(", "cls", ",", "stream_rdr", ",", "offset", ",", "value_count", ",", "value_offset", ")", ":", "return", "stream_rdr", ".", "read_str", "(", "value_count", "-", "1", ",", "value_offset", ")" ]
Return the ASCII string parsed from *stream_rdr* at *value_offset*. The length of the string, including a terminating '\x00' (NUL) character, is in *value_count*.
[ "Return", "the", "ASCII", "string", "parsed", "from", "*", "stream_rdr", "*", "at", "*", "value_offset", "*", ".", "The", "length", "of", "the", "string", "including", "a", "terminating", "\\", "x00", "(", "NUL", ")", "character", "is", "in", "*", "value...
python
train
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L1735-L1797
def haploidify_samples(self): """Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 . """ # N.B., this implementation is obscure and uses more memory than # necessary, TODO review # define the range of possible indices, e.g., diploid => (0, 1) index_range = np.arange(0, self.ploidy, dtype='u1') # create a random index for each genotype call indices = np.random.choice(index_range, size=self.n_calls, replace=True) # reshape genotype data so it's suitable for passing to np.choose # by merging the variants and samples dimensions choices = self.reshape(-1, self.ploidy).T # now use random indices to haploidify data = np.choose(indices, choices) # reshape the haploidified data to restore the variants and samples # dimensions data = data.reshape((self.n_variants, self.n_samples)) # view as haplotype array h = HaplotypeArray(data, copy=False) return h
[ "def", "haploidify_samples", "(", "self", ")", ":", "# N.B., this implementation is obscure and uses more memory than", "# necessary, TODO review", "# define the range of possible indices, e.g., diploid => (0, 1)", "index_range", "=", "np", ".", "arange", "(", "0", ",", "self", "...
Construct a pseudo-haplotype for each sample by randomly selecting an allele from each genotype call. Returns ------- h : HaplotypeArray Notes ----- If a mask has been set, it is ignored by this function. Examples -------- >>> import allel >>> import numpy as np >>> np.random.seed(42) >>> g = allel.GenotypeArray([[[0, 0], [0, 1]], ... [[0, 2], [1, 1]], ... [[1, 2], [2, 1]], ... [[2, 2], [-1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(4, 2) dtype=int64> 0 1 0 1 1 1 2 . >>> g = allel.GenotypeArray([[[0, 0, 0], [0, 0, 1]], ... [[0, 1, 1], [1, 1, 1]], ... [[0, 1, 2], [-1, -1, -1]]]) >>> g.haploidify_samples() <HaplotypeArray shape=(3, 2) dtype=int64> 0 0 1 1 2 .
[ "Construct", "a", "pseudo", "-", "haplotype", "for", "each", "sample", "by", "randomly", "selecting", "an", "allele", "from", "each", "genotype", "call", "." ]
python
train
ianepperson/pyredminews
redmine/redmine_rest.py
https://github.com/ianepperson/pyredminews/blob/b2b0581483632738a3acca3b4e093c181847b813/redmine/redmine_rest.py#L785-L822
def check_cache(self, type, data, obj=None): '''Returns the updated cached version of the given dict''' try: id = data['id'] except: # Not an identifiable item #print 'don\'t know this item %r:%r' % (type, data) return data # If obj was passed in, its type takes precedence try: type = obj._get_type() except: pass # Find the item in the cache, update and return if it's there try: hit = self.item_cache[type][id] except KeyError: pass else: hit._update_data(data) #print 'cache hit for %s at %s' % (type, id) return hit # Not there? Let's make us a new item # If we weren't given the object ref, find the name in the global scope if not obj: # Default to Redmine_Item if it's not found obj = self.item_class.get(type, Redmine_Item) new_item = obj(redmine=self, data=data, type=type) # Store it self.item_cache.setdefault(type, {})[id] = new_item #print 'set new %s at %s' % (type, id) return new_item
[ "def", "check_cache", "(", "self", ",", "type", ",", "data", ",", "obj", "=", "None", ")", ":", "try", ":", "id", "=", "data", "[", "'id'", "]", "except", ":", "# Not an identifiable item", "#print 'don\\'t know this item %r:%r' % (type, data)", "return", "data"...
Returns the updated cached version of the given dict
[ "Returns", "the", "updated", "cached", "version", "of", "the", "given", "dict" ]
python
train
Exanis/django-rest-generators
django_rest_generators/steps/database.py
https://github.com/Exanis/django-rest-generators/blob/fb14ccbba8cb029dc056d852bc13d9216dc924e4/django_rest_generators/steps/database.py#L9-L21
def then_a_model_exists(context, model_name, key, value): """ :type model_name: str :type key: str :type value: str :type context: behave.runner.Context """ model = apps.get_model(model_name) args = { key: value } obj = model.objects.get(**args) assert obj is not None
[ "def", "then_a_model_exists", "(", "context", ",", "model_name", ",", "key", ",", "value", ")", ":", "model", "=", "apps", ".", "get_model", "(", "model_name", ")", "args", "=", "{", "key", ":", "value", "}", "obj", "=", "model", ".", "objects", ".", ...
:type model_name: str :type key: str :type value: str :type context: behave.runner.Context
[ ":", "type", "model_name", ":", "str", ":", "type", "key", ":", "str", ":", "type", "value", ":", "str", ":", "type", "context", ":", "behave", ".", "runner", ".", "Context" ]
python
train
pysal/spglm
spglm/links.py
https://github.com/pysal/spglm/blob/1339898adcb7e1638f1da83d57aa37392525f018/spglm/links.py#L602-L621
def deriv(self, p): """ Derivative of CDF link Parameters ---------- p : array-like mean parameters Returns ------- g'(p) : array The derivative of CDF transform at `p` Notes ----- g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`)) """ p = self._clean(p) return 1. / self.dbn.pdf(self.dbn.ppf(p))
[ "def", "deriv", "(", "self", ",", "p", ")", ":", "p", "=", "self", ".", "_clean", "(", "p", ")", "return", "1.", "/", "self", ".", "dbn", ".", "pdf", "(", "self", ".", "dbn", ".", "ppf", "(", "p", ")", ")" ]
Derivative of CDF link Parameters ---------- p : array-like mean parameters Returns ------- g'(p) : array The derivative of CDF transform at `p` Notes ----- g'(`p`) = 1./ `dbn`.pdf(`dbn`.ppf(`p`))
[ "Derivative", "of", "CDF", "link" ]
python
train
iotile/coretools
iotilecore/iotile/core/hw/reports/report.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/reports/report.py#L184-L200
def FromDict(cls, obj): """Create an IOTileEvent from the result of a previous call to asdict(). Args: obj (dict): A dictionary produced by a call to IOTileEvent.asdict() Returns: IOTileEvent: The converted IOTileEvent object. """ timestamp = obj.get('timestamp') if timestamp is not None: import dateutil.parser timestamp = dateutil.parser.parse(timestamp) return IOTileEvent(obj.get('device_timestamp'), obj.get('stream'), obj.get('extra_data'), obj.get('data'), reading_id=obj.get('streamer_local_id'), reading_time=timestamp)
[ "def", "FromDict", "(", "cls", ",", "obj", ")", ":", "timestamp", "=", "obj", ".", "get", "(", "'timestamp'", ")", "if", "timestamp", "is", "not", "None", ":", "import", "dateutil", ".", "parser", "timestamp", "=", "dateutil", ".", "parser", ".", "pars...
Create an IOTileEvent from the result of a previous call to asdict(). Args: obj (dict): A dictionary produced by a call to IOTileEvent.asdict() Returns: IOTileEvent: The converted IOTileEvent object.
[ "Create", "an", "IOTileEvent", "from", "the", "result", "of", "a", "previous", "call", "to", "asdict", "()", "." ]
python
train
robinandeer/puzzle
puzzle/plugins/vcf/mixins/variant_extras/annotations.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/plugins/vcf/mixins/variant_extras/annotations.py#L75-L89
def _add_rank_score(self, variant_obj, info_dict): """Add the rank score if found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary """ rank_score_entry = info_dict.get('RankScore') if rank_score_entry: for family_annotation in rank_score_entry.split(','): rank_score = family_annotation.split(':')[-1] logger.debug("Updating rank_score to: {0}".format( rank_score)) variant_obj.rank_score = float(rank_score)
[ "def", "_add_rank_score", "(", "self", ",", "variant_obj", ",", "info_dict", ")", ":", "rank_score_entry", "=", "info_dict", ".", "get", "(", "'RankScore'", ")", "if", "rank_score_entry", ":", "for", "family_annotation", "in", "rank_score_entry", ".", "split", "...
Add the rank score if found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
[ "Add", "the", "rank", "score", "if", "found", "Args", ":", "variant_obj", "(", "puzzle", ".", "models", ".", "Variant", ")", "info_dict", "(", "dict", ")", ":", "A", "info", "dictionary" ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_firmware.py#L941-L954
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_slot(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") blade_slot = ET.SubElement(fwdl_entries, "blade-slot") blade_slot.text = kwargs.pop('blade_slot') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_slot", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "logical_chassis_fwdl_status", "=", "ET", ".", "Element", "(", "\"log...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
deifyed/vault
libconman/vault.py
https://github.com/deifyed/vault/blob/e3c37ade6c3e6b61a76ec6cd2ba98881c7401d97/libconman/vault.py#L46-L58
def secure(self, targets, recursive): ''' Saves information about each target file and/or folder and creates a hard link from the file(s) to the vault directory ''' for target in targets: if os.path.isfile(target): path, name = os.path.split(os.path.realpath(target)) target = Target(name, path) target.secure() else: targets += self._fetchFilesFromFolder(target, recursive)
[ "def", "secure", "(", "self", ",", "targets", ",", "recursive", ")", ":", "for", "target", "in", "targets", ":", "if", "os", ".", "path", ".", "isfile", "(", "target", ")", ":", "path", ",", "name", "=", "os", ".", "path", ".", "split", "(", "os"...
Saves information about each target file and/or folder and creates a hard link from the file(s) to the vault directory
[ "Saves", "information", "about", "each", "target", "file", "and", "/", "or", "folder", "and", "creates", "a", "hard", "link", "from", "the", "file", "(", "s", ")", "to", "the", "vault", "directory" ]
python
train
bpannier/simpletr64
simpletr64/actions/wan.py
https://github.com/bpannier/simpletr64/blob/31081139f4e6c85084a56de1617df73927135466/simpletr64/actions/wan.py#L123-L136
def getADSLInfo(self, wanInterfaceId=1, timeout=1): """Execute GetInfo action to get basic ADSL information's. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: ADSL informations. :rtype: ADSLInfo """ namespace = Wan.getServiceType("getADSLInfo") + str(wanInterfaceId) uri = self.getControlURL(namespace) results = self.execute(uri, namespace, "GetInfo", timeout=timeout) return ADSLInfo(results)
[ "def", "getADSLInfo", "(", "self", ",", "wanInterfaceId", "=", "1", ",", "timeout", "=", "1", ")", ":", "namespace", "=", "Wan", ".", "getServiceType", "(", "\"getADSLInfo\"", ")", "+", "str", "(", "wanInterfaceId", ")", "uri", "=", "self", ".", "getCont...
Execute GetInfo action to get basic ADSL information's. :param int wanInterfaceId: the id of the WAN device :param float timeout: the timeout to wait for the action to be executed :return: ADSL informations. :rtype: ADSLInfo
[ "Execute", "GetInfo", "action", "to", "get", "basic", "ADSL", "information", "s", "." ]
python
train
JohnVinyard/zounds
zounds/learn/util.py
https://github.com/JohnVinyard/zounds/blob/337b3f98753d09eaab1c72dcd37bb852a3fa5ac6/zounds/learn/util.py#L11-L29
def simple_settings(cls): """ Create sane default persistence settings for learning pipelines :param cls: The class to decorate """ class Settings(ff.PersistenceSettings): _id = cls.__name__ id_provider = ff.StaticIdProvider(_id) key_builder = ff.StringDelimitedKeyBuilder() database = ff.FileSystemDatabase( path=_id, key_builder=key_builder, createdirs=True) class Model(cls, Settings): pass Model.__name__ = cls.__name__ Model.__module__ = cls.__module__ return Model
[ "def", "simple_settings", "(", "cls", ")", ":", "class", "Settings", "(", "ff", ".", "PersistenceSettings", ")", ":", "_id", "=", "cls", ".", "__name__", "id_provider", "=", "ff", ".", "StaticIdProvider", "(", "_id", ")", "key_builder", "=", "ff", ".", "...
Create sane default persistence settings for learning pipelines :param cls: The class to decorate
[ "Create", "sane", "default", "persistence", "settings", "for", "learning", "pipelines", ":", "param", "cls", ":", "The", "class", "to", "decorate" ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L2082-L2103
def add_object(self, file_path, file_object, error_fct=None): """Add a fake file or directory into the filesystem at file_path. Args: file_path: The path to the file to be added relative to self. file_object: File or directory to add. error_class: The error class to be thrown if file_path does not correspond to a directory (used internally( Raises: IOError or OSError: if file_path does not correspond to a directory. """ error_fct = error_fct or self.raise_os_error if not file_path: target_directory = self.root else: target_directory = self.resolve(file_path) if not S_ISDIR(target_directory.st_mode): error = errno.ENOENT if self.is_windows_fs else errno.ENOTDIR error_fct(error, file_path) target_directory.add_entry(file_object)
[ "def", "add_object", "(", "self", ",", "file_path", ",", "file_object", ",", "error_fct", "=", "None", ")", ":", "error_fct", "=", "error_fct", "or", "self", ".", "raise_os_error", "if", "not", "file_path", ":", "target_directory", "=", "self", ".", "root", ...
Add a fake file or directory into the filesystem at file_path. Args: file_path: The path to the file to be added relative to self. file_object: File or directory to add. error_class: The error class to be thrown if file_path does not correspond to a directory (used internally( Raises: IOError or OSError: if file_path does not correspond to a directory.
[ "Add", "a", "fake", "file", "or", "directory", "into", "the", "filesystem", "at", "file_path", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/cif.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/cif.py#L1374-L1389
def str2float(text): """ Remove uncertainty brackets from strings and return the float. """ try: # Note that the ending ) is sometimes missing. That is why the code has # been modified to treat it as optional. Same logic applies to lists. return float(re.sub(r"\(.+\)*", "", text)) except TypeError: if isinstance(text, list) and len(text) == 1: return float(re.sub(r"\(.+\)*", "", text[0])) except ValueError as ex: if text.strip() == ".": return 0 raise ex
[ "def", "str2float", "(", "text", ")", ":", "try", ":", "# Note that the ending ) is sometimes missing. That is why the code has", "# been modified to treat it as optional. Same logic applies to lists.", "return", "float", "(", "re", ".", "sub", "(", "r\"\\(.+\\)*\"", ",", "\"\"...
Remove uncertainty brackets from strings and return the float.
[ "Remove", "uncertainty", "brackets", "from", "strings", "and", "return", "the", "float", "." ]
python
train
suds-community/suds
suds/servicedefinition.py
https://github.com/suds-community/suds/blob/6fb0a829337b5037a66c20aae6f89b41acd77e40/suds/servicedefinition.py#L107-L128
def getprefixes(self): """Add prefixes for each namespace referenced by parameter types.""" namespaces = [] for l in (self.params, self.types): for t,r in l: ns = r.namespace() if ns[1] is None: continue if ns[1] in namespaces: continue if Namespace.xs(ns) or Namespace.xsd(ns): continue namespaces.append(ns[1]) if t == r: continue ns = t.namespace() if ns[1] is None: continue if ns[1] in namespaces: continue namespaces.append(ns[1]) i = 0 namespaces.sort() for u in namespaces: p = self.nextprefix() ns = (p, u) self.prefixes.append(ns)
[ "def", "getprefixes", "(", "self", ")", ":", "namespaces", "=", "[", "]", "for", "l", "in", "(", "self", ".", "params", ",", "self", ".", "types", ")", ":", "for", "t", ",", "r", "in", "l", ":", "ns", "=", "r", ".", "namespace", "(", ")", "if...
Add prefixes for each namespace referenced by parameter types.
[ "Add", "prefixes", "for", "each", "namespace", "referenced", "by", "parameter", "types", "." ]
python
train
senaite/senaite.core
bika/lims/utils/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/utils/__init__.py#L651-L667
def measure_time(func_to_measure): """ This decorator allows to measure the execution time of a function and prints it to the console. :param func_to_measure: function to be decorated """ def wrap(*args, **kwargs): start_time = time() return_value = func_to_measure(*args, **kwargs) finish_time = time() log = "%s took %0.4f seconds. start_time = %0.4f - finish_time = %0.4f\n" % (func_to_measure.func_name, finish_time - start_time, start_time, finish_time) print log return return_value return wrap
[ "def", "measure_time", "(", "func_to_measure", ")", ":", "def", "wrap", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "start_time", "=", "time", "(", ")", "return_value", "=", "func_to_measure", "(", "*", "args", ",", "*", "*", "kwargs", ")", ...
This decorator allows to measure the execution time of a function and prints it to the console. :param func_to_measure: function to be decorated
[ "This", "decorator", "allows", "to", "measure", "the", "execution", "time", "of", "a", "function", "and", "prints", "it", "to", "the", "console", ".", ":", "param", "func_to_measure", ":", "function", "to", "be", "decorated" ]
python
train
StanfordVL/robosuite
robosuite/environments/baxter_peg_in_hole.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/environments/baxter_peg_in_hole.py#L136-L171
def reward(self, action): """ Reward function for the task. The sparse reward is 0 if the peg is outside the hole, and 1 if it's inside. We enforce that it's inside at an appropriate angle (cos(theta) > 0.95). The dense reward has four components. Reaching: in [0, 1], to encourage the arms to get together. Perpendicular and parallel distance: in [0,1], for the same purpose. Cosine of the angle: in [0, 1], to encourage having the right orientation. """ reward = 0 t, d, cos = self._compute_orientation() # Right location and angle if d < 0.06 and t >= -0.12 and t <= 0.14 and cos > 0.95: reward = 1 # use a shaping reward if self.reward_shaping: # reaching reward hole_pos = self.sim.data.body_xpos[self.hole_body_id] gripper_site_pos = self.sim.data.body_xpos[self.cyl_body_id] dist = np.linalg.norm(gripper_site_pos - hole_pos) reaching_reward = 1 - np.tanh(1.0 * dist) reward += reaching_reward # Orientation reward reward += 1 - np.tanh(d) reward += 1 - np.tanh(np.abs(t)) reward += cos return reward
[ "def", "reward", "(", "self", ",", "action", ")", ":", "reward", "=", "0", "t", ",", "d", ",", "cos", "=", "self", ".", "_compute_orientation", "(", ")", "# Right location and angle", "if", "d", "<", "0.06", "and", "t", ">=", "-", "0.12", "and", "t",...
Reward function for the task. The sparse reward is 0 if the peg is outside the hole, and 1 if it's inside. We enforce that it's inside at an appropriate angle (cos(theta) > 0.95). The dense reward has four components. Reaching: in [0, 1], to encourage the arms to get together. Perpendicular and parallel distance: in [0,1], for the same purpose. Cosine of the angle: in [0, 1], to encourage having the right orientation.
[ "Reward", "function", "for", "the", "task", "." ]
python
train
acutesoftware/AIKIF
scripts/examples/game_of_life_console.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/scripts/examples/game_of_life_console.py#L28-L43
def main(): """ Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results """ iterations = 9 # how many simulations to run years = 3 # how many times to run each simulation width = 22 # grid height height = 78 # grid width time_delay = 0.03 # delay when printing on screen lg = mod_log.Log('test') lg.record_process('Game of Life', 'game_of_life_console.py') for _ in range(iterations): s,e = run_game_of_life(years, width, height, time_delay, 'N') lg.record_result("Started with " + str(s) + " cells and ended with " + str(e) + " cells")
[ "def", "main", "(", ")", ":", "iterations", "=", "9", "# how many simulations to run", "years", "=", "3", "# how many times to run each simulation", "width", "=", "22", "# grid height", "height", "=", "78", "# grid width", "time_delay", "=", "0.03", "# delay when prin...
Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results
[ "Example", "to", "show", "AIKIF", "logging", "of", "results", ".", "Generates", "a", "sequence", "of", "random", "grids", "and", "runs", "the", "Game", "of", "Life", "saving", "results" ]
python
train
google/transitfeed
transitfeed/trip.py
https://github.com/google/transitfeed/blob/eb2991a3747ba541b2cb66502b305b6304a1f85f/transitfeed/trip.py#L163-L168
def GetCountStopTimes(self): """Return the number of stops made by this trip.""" cursor = self._schedule._connection.cursor() cursor.execute( 'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,)) return cursor.fetchone()[0]
[ "def", "GetCountStopTimes", "(", "self", ")", ":", "cursor", "=", "self", ".", "_schedule", ".", "_connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT count(*) FROM stop_times WHERE trip_id=?'", ",", "(", "self", ".", "trip_id", ",", ...
Return the number of stops made by this trip.
[ "Return", "the", "number", "of", "stops", "made", "by", "this", "trip", "." ]
python
train
senaite/senaite.jsonapi
src/senaite/jsonapi/api.py
https://github.com/senaite/senaite.jsonapi/blob/871959f4b1c9edbb477e9456325527ca78e13ec6/src/senaite/jsonapi/api.py#L622-L643
def get_brain(brain_or_object): """Return a ZCatalog brain for the object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: True if the object is a catalog brain :rtype: bool """ if is_brain(brain_or_object): return brain_or_object if is_root(brain_or_object): return brain_or_object # fetch the brain by UID uid = get_uid(brain_or_object) uc = get_tool("uid_catalog") results = uc({"UID": uid}) or search(query={'UID': uid}) if len(results) == 0: return None if len(results) > 1: fail(500, "More than one object with UID={} found in portal_catalog".format(uid)) return results[0]
[ "def", "get_brain", "(", "brain_or_object", ")", ":", "if", "is_brain", "(", "brain_or_object", ")", ":", "return", "brain_or_object", "if", "is_root", "(", "brain_or_object", ")", ":", "return", "brain_or_object", "# fetch the brain by UID", "uid", "=", "get_uid", ...
Return a ZCatalog brain for the object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: True if the object is a catalog brain :rtype: bool
[ "Return", "a", "ZCatalog", "brain", "for", "the", "object" ]
python
train
bitcaster-io/bitcaster
src/bitcaster/cli/commands/createuser.py
https://github.com/bitcaster-io/bitcaster/blob/04625a4b67c1ad01e5d38faa3093828b360d4a98/src/bitcaster/cli/commands/createuser.py#L49-L107
def createuser(ctx, email, password, superuser, no_password, prompt): 'Create a new user.' if prompt: if not email: email = click.prompt('Email') if not (password or no_password): password = click.prompt('Password') if superuser is None: superuser = click.confirm('Should this user be a superuser?', default=False) if superuser is None: superuser = False if not email: raise click.ClickException('Invalid or missing email address.') if not no_password and not password: raise click.ClickException('No password set and --no-password not passed.') import django django.setup() from bitcaster.models import User user = User.objects.filter(email=email).first() if user: if prompt: change = click.confirm(f'User {email} already exists. Proceed updating it?', default=False) if not change: ctx.exit() user.set_password(password) if superuser: user.is_superuser = superuser op = 'updated' else: click.echo('Nothing to do. User exists', err=True, color='red') sys.exit(1) else: op = 'created' user = User( email=email, is_superuser=superuser, is_staff=superuser, is_active=True, ) if password: user.set_password(password) try: user.save() except Exception as e: raise click.ClickException(e) click.echo(f'User {email} {op}')
[ "def", "createuser", "(", "ctx", ",", "email", ",", "password", ",", "superuser", ",", "no_password", ",", "prompt", ")", ":", "if", "prompt", ":", "if", "not", "email", ":", "email", "=", "click", ".", "prompt", "(", "'Email'", ")", "if", "not", "("...
Create a new user.
[ "Create", "a", "new", "user", "." ]
python
train
Shizmob/pydle
pydle/features/tls.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/features/tls.py#L67-L75
async def on_raw_671(self, message): """ WHOIS: user is connected securely. """ target, nickname = message.params[:2] info = { 'secure': True } if nickname in self._whois_info: self._whois_info[nickname].update(info)
[ "async", "def", "on_raw_671", "(", "self", ",", "message", ")", ":", "target", ",", "nickname", "=", "message", ".", "params", "[", ":", "2", "]", "info", "=", "{", "'secure'", ":", "True", "}", "if", "nickname", "in", "self", ".", "_whois_info", ":"...
WHOIS: user is connected securely.
[ "WHOIS", ":", "user", "is", "connected", "securely", "." ]
python
train
ungarj/tilematrix
tilematrix/_tilepyramid.py
https://github.com/ungarj/tilematrix/blob/6f8cd3b85f61434a7ce5d7b635c3ad8f18ccb268/tilematrix/_tilepyramid.py#L206-L230
def tiles_from_geom(self, geometry, zoom): """ Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level """ validate_zoom(zoom) if geometry.is_empty: return if not geometry.is_valid: raise ValueError("no valid geometry: %s" % geometry.type) if geometry.geom_type == "Point": yield self.tile_from_xy(geometry.x, geometry.y, zoom) elif geometry.geom_type == "MultiPoint": for point in geometry: yield self.tile_from_xy(point.x, point.y, zoom) elif geometry.geom_type in ( "LineString", "MultiLineString", "Polygon", "MultiPolygon", "GeometryCollection" ): prepared_geometry = prep(clip_geometry_to_srs_bounds(geometry, self)) for tile in self.tiles_from_bbox(geometry, zoom): if prepared_geometry.intersects(tile.bbox()): yield tile
[ "def", "tiles_from_geom", "(", "self", ",", "geometry", ",", "zoom", ")", ":", "validate_zoom", "(", "zoom", ")", "if", "geometry", ".", "is_empty", ":", "return", "if", "not", "geometry", ".", "is_valid", ":", "raise", "ValueError", "(", "\"no valid geometr...
Return all tiles intersecting with input geometry. - geometry: shapely geometry - zoom: zoom level
[ "Return", "all", "tiles", "intersecting", "with", "input", "geometry", "." ]
python
train
inasafe/inasafe
safe/metadata35/utils.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/metadata35/utils.py#L130-L147
def read_property_from_xml(root, path): """ Get the text from an XML property. Whitespaces, tabs and new lines are trimmed :param root: container in which we search :type root: ElementTree.Element :param path: path to search in root :type path: str :return: the text of the element at the given path :rtype: str, None """ element = root.find(path, XML_NS) try: return element.text.strip(' \t\n\r') except AttributeError: return None
[ "def", "read_property_from_xml", "(", "root", ",", "path", ")", ":", "element", "=", "root", ".", "find", "(", "path", ",", "XML_NS", ")", "try", ":", "return", "element", ".", "text", ".", "strip", "(", "' \\t\\n\\r'", ")", "except", "AttributeError", "...
Get the text from an XML property. Whitespaces, tabs and new lines are trimmed :param root: container in which we search :type root: ElementTree.Element :param path: path to search in root :type path: str :return: the text of the element at the given path :rtype: str, None
[ "Get", "the", "text", "from", "an", "XML", "property", "." ]
python
train
CIRCL/IP-ASN-history
server/fetch_historical_bviews.py
https://github.com/CIRCL/IP-ASN-history/blob/2e02ced01a08531a007d9cd71547c8248570de1b/server/fetch_historical_bviews.py#L113-L136
def to_download(): """ Build interval of urls to download. We always get the first file of the next day. Ex: 2013-01-01 => 2013-01-02.0000 """ first_day = parse(interval_first) last_day = parse(interval_last) format_change = parse('2010-06-14') one_day = datetime.timedelta(1) cur_day = first_day url_list = [] while cur_day < last_day: fname = filename.format(day=cur_day.strftime("%Y%m%d")) if cur_day > format_change: cur_day += one_day url = base_url.format(year_month=cur_day.strftime("%Y.%m"), file_day=cur_day.strftime("%Y%m%d")) else: url = base_url_old.format(year_month=cur_day.strftime("%Y.%m"), file_day=cur_day.strftime("%Y%m%d")) cur_day += one_day url_list.append((fname, url)) return sorted(url_list, key=lambda tup: tup[0], reverse=True)
[ "def", "to_download", "(", ")", ":", "first_day", "=", "parse", "(", "interval_first", ")", "last_day", "=", "parse", "(", "interval_last", ")", "format_change", "=", "parse", "(", "'2010-06-14'", ")", "one_day", "=", "datetime", ".", "timedelta", "(", "1", ...
Build interval of urls to download. We always get the first file of the next day. Ex: 2013-01-01 => 2013-01-02.0000
[ "Build", "interval", "of", "urls", "to", "download", ".", "We", "always", "get", "the", "first", "file", "of", "the", "next", "day", ".", "Ex", ":", "2013", "-", "01", "-", "01", "=", ">", "2013", "-", "01", "-", "02", ".", "0000" ]
python
valid
mwickert/scikit-dsp-comm
sk_dsp_comm/synchronization.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/synchronization.py#L49-L172
def NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3): """ zz,e_tau = NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3) z = complex baseband input signal at nominally Ns samples per symbol Ns = Nominal number of samples per symbol (Ts/T) in the symbol tracking loop, often 4 BnTs = time bandwidth product of loop bandwidth and the symbol period, thus the loop bandwidth as a fraction of the symbol rate. zeta = loop damping factor I_ord = interpolator order, 1, 2, or 3 e_tau = the timing error e(k) input to the loop filter Kp = The phase detector gain in the symbol tracking loop; for the NDA algoithm used here always 1 Mark Wickert July 2014 Motivated by code found in M. Rice, Digital Communications A Discrete-Time Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1). """ # Loop filter parameters K0 = -1.0 # The modulo 1 counter counts down so a sign change in loop Kp = 1.0 K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0 K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0 zz = np.zeros(len(z),dtype=np.complex128) #zz = np.zeros(int(np.floor(len(z)/float(Ns))),dtype=np.complex128) e_tau = np.zeros(len(z)) #e_tau = np.zeros(int(np.floor(len(z)/float(Ns)))) #z_TED_buff = np.zeros(Ns) c1_buff = np.zeros(2*L+1) vi = 0 CNT_next = 0 mu_next = 0 underflow = 0 epsilon = 0 mm = 1 z = np.hstack(([0], z)) for nn in range(1,Ns*int(np.floor(len(z)/float(Ns)-(Ns-1)))): # Define variables used in linear interpolator control CNT = CNT_next mu = mu_next if underflow == 1: if I_ord == 1: # Decimated interpolator output (piecewise linear) z_interp = mu*z[nn] + (1 - mu)*z[nn-1] elif I_ord == 2: # Decimated interpolator output (piecewise parabolic) # in Farrow form with alpha = 1/2 v2 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[1, -1, -1, 1]) v1 = 1/2.*np.sum(z[nn+2:nn-1-1:-1]*[-1, 3, -1, -1]) v0 = z[nn] z_interp = (mu*v2 + v1)*mu + v0 elif I_ord == 3: # Decimated interpolator output (piecewise cubic) # in Farrow form v3 = np.sum(z[nn+2:nn-1-1:-1]*[1/6., -1/2., 1/2., -1/6.]) v2 = np.sum(z[nn+2:nn-1-1:-1]*[0, 1/2., -1, 1/2.]) v1 = np.sum(z[nn+2:nn-1-1:-1]*[-1/6., 1, -1/2., -1/3.]) v0 = z[nn] z_interp = ((mu*v3 + v2)*mu + v1)*mu + v0 else: print('Error: I_ord must 1, 2, or 3') # Form TED output that is smoothed using 2*L+1 samples # We need Ns interpolants for this TED: 0:Ns-1 c1 = 0 for kk in range(Ns): if I_ord == 1: # piecewise linear interp over Ns samples for TED z_TED_interp = mu*z[nn+kk] + (1 - mu)*z[nn-1+kk] elif I_ord == 2: # piecewise parabolic in Farrow form with alpha = 1/2 v2 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1, -1, -1, 1]) v1 = 1/2.*np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1, 3, -1, -1]) v0 = z[nn+kk] z_TED_interp = (mu*v2 + v1)*mu + v0 elif I_ord == 3: # piecewise cubic in Farrow form v3 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[1/6., -1/2., 1/2., -1/6.]) v2 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[0, 1/2., -1, 1/2.]) v1 = np.sum(z[nn+kk+2:nn+kk-1-1:-1]*[-1/6., 1, -1/2., -1/3.]) v0 = z[nn+kk] z_TED_interp = ((mu*v3 + v2)*mu + v1)*mu + v0 else: print('Error: I_ord must 1, 2, or 3') c1 = c1 + np.abs(z_TED_interp)**2 * np.exp(-1j*2*np.pi/Ns*kk) c1 = c1/Ns # Update 2*L+1 length buffer for TED output smoothing c1_buff = np.hstack(([c1], c1_buff[:-1])) # Form the smoothed TED output epsilon = -1/(2*np.pi)*np.angle(np.sum(c1_buff)/(2*L+1)) # Save symbol spaced (decimated to symbol rate) interpolants in zz zz[mm] = z_interp e_tau[mm] = epsilon # log the error to the output vector e mm += 1 else: # Simple zezo-order hold interpolation between symbol samples # we just coast using the old value #epsilon = 0 pass vp = K1*epsilon # proportional component of loop filter vi = vi + K2*epsilon # integrator component of loop filter v = vp + vi # loop filter output W = 1/float(Ns) + v # counter control word # update registers CNT_next = CNT - W # Update counter value for next cycle if CNT_next < 0: # Test to see if underflow has occured CNT_next = 1 + CNT_next # Reduce counter value modulo-1 if underflow underflow = 1 # Set the underflow flag mu_next = CNT/W # update mu else: underflow = 0 mu_next = mu # Remove zero samples at end zz = zz[:-(len(zz)-mm+1)] # Normalize so symbol values have a unity magnitude zz /=np.std(zz) e_tau = e_tau[:-(len(e_tau)-mm+1)] return zz, e_tau
[ "def", "NDA_symb_sync", "(", "z", ",", "Ns", ",", "L", ",", "BnTs", ",", "zeta", "=", "0.707", ",", "I_ord", "=", "3", ")", ":", "# Loop filter parameters", "K0", "=", "-", "1.0", "# The modulo 1 counter counts down so a sign change in loop", "Kp", "=", "1.0",...
zz,e_tau = NDA_symb_sync(z,Ns,L,BnTs,zeta=0.707,I_ord=3) z = complex baseband input signal at nominally Ns samples per symbol Ns = Nominal number of samples per symbol (Ts/T) in the symbol tracking loop, often 4 BnTs = time bandwidth product of loop bandwidth and the symbol period, thus the loop bandwidth as a fraction of the symbol rate. zeta = loop damping factor I_ord = interpolator order, 1, 2, or 3 e_tau = the timing error e(k) input to the loop filter Kp = The phase detector gain in the symbol tracking loop; for the NDA algoithm used here always 1 Mark Wickert July 2014 Motivated by code found in M. Rice, Digital Communications A Discrete-Time Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
[ "zz", "e_tau", "=", "NDA_symb_sync", "(", "z", "Ns", "L", "BnTs", "zeta", "=", "0", ".", "707", "I_ord", "=", "3", ")" ]
python
valid
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1283-L1300
def delete_server(self, datacenter_id, server_id): """ Removes the server from your data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str`` """ response = self._perform_request( url='/datacenters/%s/servers/%s' % ( datacenter_id, server_id), method='DELETE') return response
[ "def", "delete_server", "(", "self", ",", "datacenter_id", ",", "server_id", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "url", "=", "'/datacenters/%s/servers/%s'", "%", "(", "datacenter_id", ",", "server_id", ")", ",", "method", "=", "'DE...
Removes the server from your data center. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param server_id: The unique ID of the server. :type server_id: ``str``
[ "Removes", "the", "server", "from", "your", "data", "center", "." ]
python
valid
MillionIntegrals/vel
vel/rl/models/stochastic_policy_model_separate.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/stochastic_policy_model_separate.py#L34-L42
def reset_weights(self): """ Initialize properly model weights """ self.input_block.reset_weights() self.policy_backbone.reset_weights() self.value_backbone.reset_weights() self.action_head.reset_weights() self.value_head.reset_weights()
[ "def", "reset_weights", "(", "self", ")", ":", "self", ".", "input_block", ".", "reset_weights", "(", ")", "self", ".", "policy_backbone", ".", "reset_weights", "(", ")", "self", ".", "value_backbone", ".", "reset_weights", "(", ")", "self", ".", "action_hea...
Initialize properly model weights
[ "Initialize", "properly", "model", "weights" ]
python
train
saltstack/salt
salt/pillar/sql_base.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/sql_base.py#L230-L284
def extract_queries(self, args, kwargs): ''' This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts. ''' # Please note the function signature is NOT an error. Neither args, nor # kwargs should have asterisks. We are passing in a list and dict, # rather than receiving variable args. Adding asterisks WILL BREAK the # function completely. # First, this is the query buffer. Contains lists of [base,sql] qbuffer = [] # Add on the non-keywords... qbuffer.extend([[None, s] for s in args]) # And then the keywords... # They aren't in definition order, but they can't conflict each other. klist = list(kwargs.keys()) klist.sort() qbuffer.extend([[k, kwargs[k]] for k in klist]) # Filter out values that don't have queries. qbuffer = [x for x in qbuffer if ( (isinstance(x[1], six.string_types) and len(x[1])) or (isinstance(x[1], (list, tuple)) and (len(x[1]) > 0) and x[1][0]) or (isinstance(x[1], dict) and 'query' in x[1] and len(x[1]['query'])) )] # Next, turn the whole buffer into full dicts. for qb in qbuffer: defaults = {'query': '', 'depth': 0, 'as_list': False, 'with_lists': None, 'ignore_null': False } if isinstance(qb[1], six.string_types): defaults['query'] = qb[1] elif isinstance(qb[1], (list, tuple)): defaults['query'] = qb[1][0] if len(qb[1]) > 1: defaults['depth'] = qb[1][1] # May set 'as_list' from qb[1][2]. else: defaults.update(qb[1]) if defaults['with_lists'] and isinstance(defaults['with_lists'], six.string_types): defaults['with_lists'] = [ int(i) for i in defaults['with_lists'].split(',') ] qb[1] = defaults return qbuffer
[ "def", "extract_queries", "(", "self", ",", "args", ",", "kwargs", ")", ":", "# Please note the function signature is NOT an error. Neither args, nor", "# kwargs should have asterisks. We are passing in a list and dict,", "# rather than receiving variable args. Adding asterisks WILL BREAK...
This function normalizes the config block into a set of queries we can use. The return is a list of consistently laid out dicts.
[ "This", "function", "normalizes", "the", "config", "block", "into", "a", "set", "of", "queries", "we", "can", "use", ".", "The", "return", "is", "a", "list", "of", "consistently", "laid", "out", "dicts", "." ]
python
train
pantsbuild/pants
src/python/pants/bin/daemon_pants_runner.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/bin/daemon_pants_runner.py#L245-L262
def nailgunned_stdio(cls, sock, env, handle_stdin=True): """Redirects stdio to the connected socket speaking the nailgun protocol.""" # Determine output tty capabilities from the environment. stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env) is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty)) if is_tty_capable: with cls._tty_stdio(env) as finalizer: yield finalizer else: with cls._pipe_stdio( sock, stdin_isatty, stdout_isatty, stderr_isatty, handle_stdin ) as finalizer: yield finalizer
[ "def", "nailgunned_stdio", "(", "cls", ",", "sock", ",", "env", ",", "handle_stdin", "=", "True", ")", ":", "# Determine output tty capabilities from the environment.", "stdin_isatty", ",", "stdout_isatty", ",", "stderr_isatty", "=", "NailgunProtocol", ".", "isatty_from...
Redirects stdio to the connected socket speaking the nailgun protocol.
[ "Redirects", "stdio", "to", "the", "connected", "socket", "speaking", "the", "nailgun", "protocol", "." ]
python
train
frictionlessdata/tabulator-py
tabulator/helpers.py
https://github.com/frictionlessdata/tabulator-py/blob/06c25845a7139d919326388cc6335f33f909db8c/tabulator/helpers.py#L68-L83
def detect_encoding(sample, encoding=None): """Detect encoding of a byte string sample. """ # To reduce tabulator import time from cchardet import detect if encoding is not None: return normalize_encoding(sample, encoding) result = detect(sample) confidence = result['confidence'] or 0 encoding = result['encoding'] or 'ascii' encoding = normalize_encoding(sample, encoding) if confidence < config.ENCODING_CONFIDENCE: encoding = config.DEFAULT_ENCODING if encoding == 'ascii': encoding = config.DEFAULT_ENCODING return encoding
[ "def", "detect_encoding", "(", "sample", ",", "encoding", "=", "None", ")", ":", "# To reduce tabulator import time", "from", "cchardet", "import", "detect", "if", "encoding", "is", "not", "None", ":", "return", "normalize_encoding", "(", "sample", ",", "encoding"...
Detect encoding of a byte string sample.
[ "Detect", "encoding", "of", "a", "byte", "string", "sample", "." ]
python
train
allenai/allennlp
allennlp/training/tensorboard_writer.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/training/tensorboard_writer.py#L141-L178
def log_metrics(self, train_metrics: dict, val_metrics: dict = None, epoch: int = None, log_to_console: bool = False) -> None: """ Sends all of the train metrics (and validation metrics, if provided) to tensorboard. """ metric_names = set(train_metrics.keys()) if val_metrics is not None: metric_names.update(val_metrics.keys()) val_metrics = val_metrics or {} # For logging to the console if log_to_console: dual_message_template = "%s | %8.3f | %8.3f" no_val_message_template = "%s | %8.3f | %8s" no_train_message_template = "%s | %8s | %8.3f" header_template = "%s | %-10s" name_length = max([len(x) for x in metric_names]) logger.info(header_template, "Training".rjust(name_length + 13), "Validation") for name in metric_names: # Log to tensorboard train_metric = train_metrics.get(name) if train_metric is not None: self.add_train_scalar(name, train_metric, timestep=epoch) val_metric = val_metrics.get(name) if val_metric is not None: self.add_validation_scalar(name, val_metric, timestep=epoch) # And maybe log to console if log_to_console and val_metric is not None and train_metric is not None: logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric) elif log_to_console and val_metric is not None: logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric) elif log_to_console and train_metric is not None: logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
[ "def", "log_metrics", "(", "self", ",", "train_metrics", ":", "dict", ",", "val_metrics", ":", "dict", "=", "None", ",", "epoch", ":", "int", "=", "None", ",", "log_to_console", ":", "bool", "=", "False", ")", "->", "None", ":", "metric_names", "=", "s...
Sends all of the train metrics (and validation metrics, if provided) to tensorboard.
[ "Sends", "all", "of", "the", "train", "metrics", "(", "and", "validation", "metrics", "if", "provided", ")", "to", "tensorboard", "." ]
python
train
brews/snakebacon
snakebacon/agedepth.py
https://github.com/brews/snakebacon/blob/f5363d0d1225912adc30031bf2c13b54000de8f2/snakebacon/agedepth.py#L64-L72
def fit(self): """Fit MCMC AgeDepthModel""" self._mcmcfit = self.mcmcsetup.run() self._mcmcfit.burnin(self.burnin) dmin = min(self._mcmcfit.depth_segments) dmax = max(self._mcmcfit.depth_segments) self._thick = (dmax - dmin) / len(self.mcmcfit.depth_segments) self._depth = np.arange(dmin, dmax + 0.001) self._age_ensemble = np.array([self.agedepth(d=dx) for dx in self.depth])
[ "def", "fit", "(", "self", ")", ":", "self", ".", "_mcmcfit", "=", "self", ".", "mcmcsetup", ".", "run", "(", ")", "self", ".", "_mcmcfit", ".", "burnin", "(", "self", ".", "burnin", ")", "dmin", "=", "min", "(", "self", ".", "_mcmcfit", ".", "de...
Fit MCMC AgeDepthModel
[ "Fit", "MCMC", "AgeDepthModel" ]
python
train
archman/beamline
beamline/datautils.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/datautils.py#L53-L79
def getAllCols(self, sddsfile=None): """ get all available column names from sddsfile :param sddsfile: sdds file name, if not given, rollback to the one that from ``__init__()`` :return: all sdds data column names :rtype: list :Example: >>> dh = DataExtracter('test.out') >>> print(dh.getAllCols()) ['x', 'xp', 'y', 'yp', 't', 'p', 'particleID'] >>> print(dh.getAllCols('test.twi')) ['s', 'betax', 'alphax', 'psix', 'etax', 'etaxp', 'xAperture', 'betay', 'alphay', 'psiy', 'etay', 'etayp', 'yAperture', 'pCentral0', 'ElementName', 'ElementOccurence', 'ElementType'] """ if SDDS_: if sddsfile is not None: sddsobj = sdds.SDDS(2) sddsobj.load(sddsfile) else: sddsobj = self.sddsobj return sddsobj.columnName else: if sddsfile is None: sddsfile = self.sddsfile return subprocess.check_output(['sddsquery', '-col', sddsfile]).split()
[ "def", "getAllCols", "(", "self", ",", "sddsfile", "=", "None", ")", ":", "if", "SDDS_", ":", "if", "sddsfile", "is", "not", "None", ":", "sddsobj", "=", "sdds", ".", "SDDS", "(", "2", ")", "sddsobj", ".", "load", "(", "sddsfile", ")", "else", ":",...
get all available column names from sddsfile :param sddsfile: sdds file name, if not given, rollback to the one that from ``__init__()`` :return: all sdds data column names :rtype: list :Example: >>> dh = DataExtracter('test.out') >>> print(dh.getAllCols()) ['x', 'xp', 'y', 'yp', 't', 'p', 'particleID'] >>> print(dh.getAllCols('test.twi')) ['s', 'betax', 'alphax', 'psix', 'etax', 'etaxp', 'xAperture', 'betay', 'alphay', 'psiy', 'etay', 'etayp', 'yAperture', 'pCentral0', 'ElementName', 'ElementOccurence', 'ElementType']
[ "get", "all", "available", "column", "names", "from", "sddsfile" ]
python
train
briancappello/py-yaml-fixtures
py_yaml_fixtures/fixtures_loader.py
https://github.com/briancappello/py-yaml-fixtures/blob/60c37daf58ec3b1c4bba637889949523a69b8a73/py_yaml_fixtures/fixtures_loader.py#L53-L95
def create_all(self, progress_callback: Optional[callable] = None) -> Dict[str, object]: """ Creates all the models discovered from fixture files in :attr:`fixtures_dir`. :param progress_callback: An optional function to track progress. It must take three parameters: - an :class:`Identifier` - the model instance - and a boolean specifying whether the model was created :return: A dictionary keyed by identifier where the values are model instances. """ if not self._loaded: self._load_data() # build up a directed acyclic graph to determine the model instantiation order dag = nx.DiGraph() for model_class_name, dependencies in self.relationships.items(): dag.add_node(model_class_name) for dep in dependencies: dag.add_edge(model_class_name, dep) try: creation_order = reversed(list(nx.topological_sort(dag))) except nx.NetworkXUnfeasible: raise Exception('Circular dependency detected between models: ' ', '.join(['{a} -> {b}'.format(a=a, b=b) for a, b in nx.find_cycle(dag)])) # create or update the models in the determined order rv = {} for model_class_name in creation_order: for identifier_key, data in self.model_fixtures[model_class_name].items(): identifier = Identifier(model_class_name, identifier_key) data = self.factory.maybe_convert_values(identifier, data) self._cache[identifier_key] = data model_instance, created = self.factory.create_or_update(identifier, data) if progress_callback: progress_callback(identifier, model_instance, created) rv[identifier_key] = model_instance self.factory.commit() return rv
[ "def", "create_all", "(", "self", ",", "progress_callback", ":", "Optional", "[", "callable", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "object", "]", ":", "if", "not", "self", ".", "_loaded", ":", "self", ".", "_load_data", "(", ")", "# ...
Creates all the models discovered from fixture files in :attr:`fixtures_dir`. :param progress_callback: An optional function to track progress. It must take three parameters: - an :class:`Identifier` - the model instance - and a boolean specifying whether the model was created :return: A dictionary keyed by identifier where the values are model instances.
[ "Creates", "all", "the", "models", "discovered", "from", "fixture", "files", "in", ":", "attr", ":", "fixtures_dir", "." ]
python
train
kxgames/kxg
kxg/tokens.py
https://github.com/kxgames/kxg/blob/a68c01dc4aa1abf6b3780ba2c65a7828282566aa/kxg/tokens.py#L69-L139
def add_safety_check(member_name, member_value): """ If the given member is a method that is public (i.e. doesn't start with an underscore) and hasn't been marked as read-only, replace it with a version that will check to make sure the world is locked. This ensures that methods that alter the token are only called from update methods or messages. """ import functools from types import FunctionType # Bail if the given member is read-only, private, or not a method. is_method = isinstance(member_value, FunctionType) is_read_only = hasattr(member_value, '_kxg_read_only') is_private = member_name.startswith('_') if not is_method or is_read_only or is_private: return member_value def safety_checked_method(self, *args, **kwargs): """ Make sure that the token the world is locked before a non-read-only method is called. """ # Because these checks are pretty magical, I want to be really # careful to avoid raising any exceptions other than the check # itself (which comes with a very clear error message). Here, that # means using getattr() to make sure the world attribute actually # exists. For example, there's nothing wrong with the following # code, but it does call a safety-checked method before the world # attribute is defined: # # class MyToken(kxg.Token): # def __init__(self): # self.init_helper() # super().__init__() world = getattr(self, 'world', None) if world and world.is_locked(): nonlocal member_name raise ApiUsageError("""\ attempted unsafe invocation of {self.__class__.__name__}.{member_name}(). This error brings attention to situations that might cause synchronization issues in multiplayer games. The {member_name}() method is not marked as read-only, but it was invoked from outside the context of a message. This means that if {member_name}() makes any changes to the world, those changes will not be propagated. If {member_name}() is actually read-only, label it with the @kxg.read_only decorator.""") # After making that check, call the method as usual. return member_value(self, *args, **kwargs) # Preserve any "forum observer" decorations that have been placed on # the method and restore the method's original name and module strings, # to make inspection and debugging a little easier. functools.update_wrapper( safety_checked_method, member_value, assigned=functools.WRAPPER_ASSIGNMENTS + ( '_kxg_subscribe_to_message', '_kxg_subscribe_to_sync_response', '_kxg_subscribe_to_undo_response', ) ) return safety_checked_method
[ "def", "add_safety_check", "(", "member_name", ",", "member_value", ")", ":", "import", "functools", "from", "types", "import", "FunctionType", "# Bail if the given member is read-only, private, or not a method.", "is_method", "=", "isinstance", "(", "member_value", ",", "F...
If the given member is a method that is public (i.e. doesn't start with an underscore) and hasn't been marked as read-only, replace it with a version that will check to make sure the world is locked. This ensures that methods that alter the token are only called from update methods or messages.
[ "If", "the", "given", "member", "is", "a", "method", "that", "is", "public", "(", "i", ".", "e", ".", "doesn", "t", "start", "with", "an", "underscore", ")", "and", "hasn", "t", "been", "marked", "as", "read", "-", "only", "replace", "it", "with", ...
python
valid
slhck/ffmpeg-normalize
ffmpeg_normalize/_streams.py
https://github.com/slhck/ffmpeg-normalize/blob/18477a7f2d092777ee238340be40c04ecb45c132/ffmpeg_normalize/_streams.py#L78-L117
def parse_volumedetect_stats(self): """ Use ffmpeg with volumedetect filter to get the mean volume of the input file. """ logger.info( "Running first pass volumedetect filter for stream {}".format(self.stream_id) ) filter_str = '[0:{}]volumedetect'.format(self.stream_id) cmd = [ self.media_file.ffmpeg_normalize.ffmpeg_exe, '-nostdin', '-y', '-i', self.media_file.input_file, '-filter_complex', filter_str, '-vn', '-sn', '-f', 'null', NUL ] cmd_runner = CommandRunner(cmd) for progress in cmd_runner.run_ffmpeg_command(): yield progress output = cmd_runner.get_output() logger.debug("Volumedetect command output:") logger.debug(output) mean_volume_matches = re.findall(r"mean_volume: ([\-\d\.]+) dB", output) if mean_volume_matches: self.loudness_statistics['mean'] = float(mean_volume_matches[0]) else: raise FFmpegNormalizeError( "Could not get mean volume for {}".format(self.media_file.input_file) ) max_volume_matches = re.findall(r"max_volume: ([\-\d\.]+) dB", output) if max_volume_matches: self.loudness_statistics['max'] = float(max_volume_matches[0]) else: raise FFmpegNormalizeError( "Could not get max volume for {}".format(self.media_file.input_file) )
[ "def", "parse_volumedetect_stats", "(", "self", ")", ":", "logger", ".", "info", "(", "\"Running first pass volumedetect filter for stream {}\"", ".", "format", "(", "self", ".", "stream_id", ")", ")", "filter_str", "=", "'[0:{}]volumedetect'", ".", "format", "(", "...
Use ffmpeg with volumedetect filter to get the mean volume of the input file.
[ "Use", "ffmpeg", "with", "volumedetect", "filter", "to", "get", "the", "mean", "volume", "of", "the", "input", "file", "." ]
python
train
DMSC-Instrument-Data/lewis
src/lewis/devices/linkam_t95/interfaces/stream_interface.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/devices/linkam_t95/interfaces/stream_interface.py#L172-L190
def pump_command(self, param): """ Models "LNP Pump Commands" functionality of device. Switches between automatic or manual pump mode, and adjusts speed when in manual mode. :param param: 'a0' for auto, 'm0' for manual, [0-N] for speed. :return: """ lookup = [c for c in "0123456789:;<=>?@ABCDEFGHIJKLMN"] if param == "a0": self.device.pump_manual_mode = False elif param == "m0": self.device.pump_manual_mode = True elif param in lookup: self.device.manual_target_speed = lookup.index(param) return ""
[ "def", "pump_command", "(", "self", ",", "param", ")", ":", "lookup", "=", "[", "c", "for", "c", "in", "\"0123456789:;<=>?@ABCDEFGHIJKLMN\"", "]", "if", "param", "==", "\"a0\"", ":", "self", ".", "device", ".", "pump_manual_mode", "=", "False", "elif", "pa...
Models "LNP Pump Commands" functionality of device. Switches between automatic or manual pump mode, and adjusts speed when in manual mode. :param param: 'a0' for auto, 'm0' for manual, [0-N] for speed. :return:
[ "Models", "LNP", "Pump", "Commands", "functionality", "of", "device", "." ]
python
train
odlgroup/odl
odl/operator/operator.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/operator/operator.py#L697-L748
def norm(self, estimate=False, **kwargs): """Return the operator norm of this operator. If this operator is non-linear, this should be the Lipschitz constant. Parameters ---------- estimate : bool If true, estimate the operator norm. By default, it is estimated using `power_method_opnorm`, which is only applicable for linear operators. Subclasses are allowed to ignore this parameter if they can provide an exact value. Other Parameters ---------------- kwargs : If ``estimate`` is True, pass these arguments to the `power_method_opnorm` call. Returns ------- norm : float Examples -------- Some operators know their own operator norm and do not need an estimate >>> spc = odl.rn(3) >>> id = odl.IdentityOperator(spc) >>> id.norm(True) 1.0 For others, there is no closed form expression and an estimate is needed: >>> spc = odl.uniform_discr(0, 1, 3) >>> grad = odl.Gradient(spc) >>> opnorm = grad.norm(estimate=True) """ if not estimate: raise NotImplementedError('`Operator.norm()` not implemented, use ' '`Operator.norm(estimate=True)` to ' 'obtain an estimate.') else: norm = getattr(self, '__norm', None) if norm is not None: return norm else: from odl.operator.oputils import power_method_opnorm self.__norm = power_method_opnorm(self, **kwargs) return self.__norm
[ "def", "norm", "(", "self", ",", "estimate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "not", "estimate", ":", "raise", "NotImplementedError", "(", "'`Operator.norm()` not implemented, use '", "'`Operator.norm(estimate=True)` to '", "'obtain an estimate.'",...
Return the operator norm of this operator. If this operator is non-linear, this should be the Lipschitz constant. Parameters ---------- estimate : bool If true, estimate the operator norm. By default, it is estimated using `power_method_opnorm`, which is only applicable for linear operators. Subclasses are allowed to ignore this parameter if they can provide an exact value. Other Parameters ---------------- kwargs : If ``estimate`` is True, pass these arguments to the `power_method_opnorm` call. Returns ------- norm : float Examples -------- Some operators know their own operator norm and do not need an estimate >>> spc = odl.rn(3) >>> id = odl.IdentityOperator(spc) >>> id.norm(True) 1.0 For others, there is no closed form expression and an estimate is needed: >>> spc = odl.uniform_discr(0, 1, 3) >>> grad = odl.Gradient(spc) >>> opnorm = grad.norm(estimate=True)
[ "Return", "the", "operator", "norm", "of", "this", "operator", "." ]
python
train
nickoala/telepot
telepot/aio/delegate.py
https://github.com/nickoala/telepot/blob/3792fde251d0f1d5a6ca16c8ad1a71f89360c41d/telepot/aio/delegate.py#L47-L56
def call(corofunc, *args, **kwargs): """ :return: a delegator function that returns a coroutine object by calling ``corofunc(seed_tuple, *args, **kwargs)``. """ corofunc = _ensure_coroutine_function(corofunc) def f(seed_tuple): return corofunc(seed_tuple, *args, **kwargs) return f
[ "def", "call", "(", "corofunc", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "corofunc", "=", "_ensure_coroutine_function", "(", "corofunc", ")", "def", "f", "(", "seed_tuple", ")", ":", "return", "corofunc", "(", "seed_tuple", ",", "*", "args", ...
:return: a delegator function that returns a coroutine object by calling ``corofunc(seed_tuple, *args, **kwargs)``.
[ ":", "return", ":", "a", "delegator", "function", "that", "returns", "a", "coroutine", "object", "by", "calling", "corofunc", "(", "seed_tuple", "*", "args", "**", "kwargs", ")", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_lowering_match/utils.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_lowering_match/utils.py#L15-L24
def convert_coerce_type_to_instanceof_filter(coerce_type_block): """Create an "INSTANCEOF" Filter block from a CoerceType block.""" coerce_type_target = get_only_element_from_collection(coerce_type_block.target_class) # INSTANCEOF requires the target class to be passed in as a string, # so we make the target class a string literal. new_predicate = BinaryComposition( u'INSTANCEOF', LocalField('@this'), Literal(coerce_type_target)) return Filter(new_predicate)
[ "def", "convert_coerce_type_to_instanceof_filter", "(", "coerce_type_block", ")", ":", "coerce_type_target", "=", "get_only_element_from_collection", "(", "coerce_type_block", ".", "target_class", ")", "# INSTANCEOF requires the target class to be passed in as a string,", "# so we make...
Create an "INSTANCEOF" Filter block from a CoerceType block.
[ "Create", "an", "INSTANCEOF", "Filter", "block", "from", "a", "CoerceType", "block", "." ]
python
train
jbasko/wr-profiles
wr_profiles/envvar_profile.py
https://github.com/jbasko/wr-profiles/blob/809c843e24108faa78c3ff77c038f6c27868191e/wr_profiles/envvar_profile.py#L401-L407
def activate(self, profile_name=NotSet): """ Sets <PROFILE_ROOT>_PROFILE environment variable to the name of the current profile. """ if profile_name is NotSet: profile_name = self.profile_name self._active_profile_name = profile_name
[ "def", "activate", "(", "self", ",", "profile_name", "=", "NotSet", ")", ":", "if", "profile_name", "is", "NotSet", ":", "profile_name", "=", "self", ".", "profile_name", "self", ".", "_active_profile_name", "=", "profile_name" ]
Sets <PROFILE_ROOT>_PROFILE environment variable to the name of the current profile.
[ "Sets", "<PROFILE_ROOT", ">", "_PROFILE", "environment", "variable", "to", "the", "name", "of", "the", "current", "profile", "." ]
python
train
ArchiveTeam/wpull
wpull/protocol/ftp/util.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/protocol/ftp/util.py#L86-L95
def reply_code_tuple(code: int) -> Tuple[int, int, int]: '''Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit. ''' return code // 100, code // 10 % 10, code % 10
[ "def", "reply_code_tuple", "(", "code", ":", "int", ")", "->", "Tuple", "[", "int", ",", "int", ",", "int", "]", ":", "return", "code", "//", "100", ",", "code", "//", "10", "%", "10", ",", "code", "%", "10" ]
Return the reply code as a tuple. Args: code: The reply code. Returns: Each item in the tuple is the digit.
[ "Return", "the", "reply", "code", "as", "a", "tuple", "." ]
python
train