repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
IRC-SPHERE/HyperStream
hyperstream/workflow/workflow.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/workflow/workflow.py#L573-L605
def factorgraph_viz(d): """ Map the dictionary into factorgraph-viz format. See https://github.com/mbforbes/factorgraph-viz :param d: The dictionary :return: The formatted dictionary """ m = defaultdict(list) for node in d['nodes']: m['nodes'].append(dict( id=node['id'], type='rv' )) for factor in d['factors']: m['nodes'].append(dict( id=factor['id'], type='fac' )) for source in factor['sources']: m['links'].append(dict( source=source, target=factor['id'] )) if factor['sink']: m['links'].append(dict( source=factor['id'], target=factor['sink'] )) return dict(m)
[ "def", "factorgraph_viz", "(", "d", ")", ":", "m", "=", "defaultdict", "(", "list", ")", "for", "node", "in", "d", "[", "'nodes'", "]", ":", "m", "[", "'nodes'", "]", ".", "append", "(", "dict", "(", "id", "=", "node", "[", "'id'", "]", ",", "t...
Map the dictionary into factorgraph-viz format. See https://github.com/mbforbes/factorgraph-viz :param d: The dictionary :return: The formatted dictionary
[ "Map", "the", "dictionary", "into", "factorgraph", "-", "viz", "format", ".", "See", "https", ":", "//", "github", ".", "com", "/", "mbforbes", "/", "factorgraph", "-", "viz" ]
python
train
Thermondo/django-heroku-connect
heroku_connect/db/router.py
https://github.com/Thermondo/django-heroku-connect/blob/f390e0fbf256ee79b30bb88f9a8c9576c6c8d9b5/heroku_connect/db/router.py#L26-L39
def db_for_write(self, model, **hints): """ Prevent write actions on read-only tables. Raises: WriteNotSupportedError: If models.sf_access is ``read_only``. """ try: if model.sf_access == READ_ONLY: raise WriteNotSupportedError("%r is a read-only model." % model) except AttributeError: pass return None
[ "def", "db_for_write", "(", "self", ",", "model", ",", "*", "*", "hints", ")", ":", "try", ":", "if", "model", ".", "sf_access", "==", "READ_ONLY", ":", "raise", "WriteNotSupportedError", "(", "\"%r is a read-only model.\"", "%", "model", ")", "except", "Att...
Prevent write actions on read-only tables. Raises: WriteNotSupportedError: If models.sf_access is ``read_only``.
[ "Prevent", "write", "actions", "on", "read", "-", "only", "tables", "." ]
python
train
welchbj/sublemon
sublemon/runtime.py
https://github.com/welchbj/sublemon/blob/edbfd1ca2a0ce3de9470dfc88f8db1cadf4b6326/sublemon/runtime.py#L123-L138
def spawn(self, *cmds: str) -> List[SublemonSubprocess]: """Coroutine to spawn shell commands. If `max_concurrency` is reached during the attempt to spawn the specified subprocesses, excess subprocesses will block while attempting to acquire this server's semaphore. """ if not self._is_running: raise SublemonRuntimeError( 'Attempted to spawn subprocesses from a non-started server') subprocs = [SublemonSubprocess(self, cmd) for cmd in cmds] for sp in subprocs: asyncio.ensure_future(sp.spawn()) return subprocs
[ "def", "spawn", "(", "self", ",", "*", "cmds", ":", "str", ")", "->", "List", "[", "SublemonSubprocess", "]", ":", "if", "not", "self", ".", "_is_running", ":", "raise", "SublemonRuntimeError", "(", "'Attempted to spawn subprocesses from a non-started server'", ")...
Coroutine to spawn shell commands. If `max_concurrency` is reached during the attempt to spawn the specified subprocesses, excess subprocesses will block while attempting to acquire this server's semaphore.
[ "Coroutine", "to", "spawn", "shell", "commands", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/feature_calculators.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/feature_calculators.py#L170-L179
def set_property(key, value): """ This method returns a decorator that sets the property key of the function to value """ def decorate_func(func): setattr(func, key, value) if func.__doc__ and key == "fctype": func.__doc__ = func.__doc__ + "\n\n *This function is of type: " + value + "*\n" return func return decorate_func
[ "def", "set_property", "(", "key", ",", "value", ")", ":", "def", "decorate_func", "(", "func", ")", ":", "setattr", "(", "func", ",", "key", ",", "value", ")", "if", "func", ".", "__doc__", "and", "key", "==", "\"fctype\"", ":", "func", ".", "__doc_...
This method returns a decorator that sets the property key of the function to value
[ "This", "method", "returns", "a", "decorator", "that", "sets", "the", "property", "key", "of", "the", "function", "to", "value" ]
python
train
crccheck/cloudwatch-to-graphite
plumbum.py
https://github.com/crccheck/cloudwatch-to-graphite/blob/28a11ee56f7231cef6b6f8af142a8aab3d2eb5a6/plumbum.py#L157-L161
def list_rds(region, filter_by_kwargs): """List all RDS thingys.""" conn = boto.rds.connect_to_region(region) instances = conn.get_all_dbinstances() return lookup(instances, filter_by=filter_by_kwargs)
[ "def", "list_rds", "(", "region", ",", "filter_by_kwargs", ")", ":", "conn", "=", "boto", ".", "rds", ".", "connect_to_region", "(", "region", ")", "instances", "=", "conn", ".", "get_all_dbinstances", "(", ")", "return", "lookup", "(", "instances", ",", "...
List all RDS thingys.
[ "List", "all", "RDS", "thingys", "." ]
python
train
samuelcolvin/pydantic
pydantic/main.py
https://github.com/samuelcolvin/pydantic/blob/bff8a1789dfde2c38928cced6640887b53615aa3/pydantic/main.py#L296-L316
def json( self, *, include: 'SetStr' = None, exclude: 'SetStr' = None, by_alias: bool = False, skip_defaults: bool = False, encoder: Optional[Callable[[Any], Any]] = None, **dumps_kwargs: Any, ) -> str: """ Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`. `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`. """ encoder = cast(Callable[[Any], Any], encoder or self._json_encoder) return json.dumps( self.dict(include=include, exclude=exclude, by_alias=by_alias, skip_defaults=skip_defaults), default=encoder, **dumps_kwargs, )
[ "def", "json", "(", "self", ",", "*", ",", "include", ":", "'SetStr'", "=", "None", ",", "exclude", ":", "'SetStr'", "=", "None", ",", "by_alias", ":", "bool", "=", "False", ",", "skip_defaults", ":", "bool", "=", "False", ",", "encoder", ":", "Optio...
Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`. `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
[ "Generate", "a", "JSON", "representation", "of", "the", "model", "include", "and", "exclude", "arguments", "as", "per", "dict", "()", "." ]
python
train
a1ezzz/wasp-general
wasp_general/datetime.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/datetime.py#L65-L84
def local_datetime(dt=None, utc_value=True): """ Convert UTC datetime and/or datetime without timezone information to local datetime with timezone information :param dt: datetime in UTC to convert. If is None, then system datetime value is used :param utc_value: whether dt is a datetime in UTC or in system timezone without timezone information :return: datetime for system (local) timezone with tz set """ # TODO: rename utc_value to utc_tz or in_utc_tz if dt is None: return datetime.now(tz=local_tz()) result = dt if result.utcoffset() is None: if utc_value is False: return result.replace(tzinfo=local_tz()) else: result = result.replace(tzinfo=timezone.utc) return result.astimezone(local_tz())
[ "def", "local_datetime", "(", "dt", "=", "None", ",", "utc_value", "=", "True", ")", ":", "# TODO: rename utc_value to utc_tz or in_utc_tz", "if", "dt", "is", "None", ":", "return", "datetime", ".", "now", "(", "tz", "=", "local_tz", "(", ")", ")", "result",...
Convert UTC datetime and/or datetime without timezone information to local datetime with timezone information :param dt: datetime in UTC to convert. If is None, then system datetime value is used :param utc_value: whether dt is a datetime in UTC or in system timezone without timezone information :return: datetime for system (local) timezone with tz set
[ "Convert", "UTC", "datetime", "and", "/", "or", "datetime", "without", "timezone", "information", "to", "local", "datetime", "with", "timezone", "information" ]
python
train
yjzhang/uncurl_python
uncurl/preprocessing.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/preprocessing.py#L69-L91
def cell_normalize(data): """ Returns the data where the expression is normalized so that the total count per cell is equal. """ if sparse.issparse(data): data = sparse.csc_matrix(data.astype(float)) # normalize in-place sparse_cell_normalize(data.data, data.indices, data.indptr, data.shape[1], data.shape[0]) return data data_norm = data.astype(float) total_umis = [] for i in range(data.shape[1]): di = data_norm[:,i] total_umis.append(di.sum()) di /= total_umis[i] med = np.median(total_umis) data_norm *= med return data_norm
[ "def", "cell_normalize", "(", "data", ")", ":", "if", "sparse", ".", "issparse", "(", "data", ")", ":", "data", "=", "sparse", ".", "csc_matrix", "(", "data", ".", "astype", "(", "float", ")", ")", "# normalize in-place", "sparse_cell_normalize", "(", "dat...
Returns the data where the expression is normalized so that the total count per cell is equal.
[ "Returns", "the", "data", "where", "the", "expression", "is", "normalized", "so", "that", "the", "total", "count", "per", "cell", "is", "equal", "." ]
python
train
ArangoDB-Community/pyArango
pyArango/collection.py
https://github.com/ArangoDB-Community/pyArango/blob/dd72e5f6c540e5e148943d615ddf7553bb78ce0b/pyArango/collection.py#L302-L309
def createDocument_(self, initDict = None) : "create and returns a completely empty document or one populated with initDict" if initDict is None : initV = {} else : initV = initDict return self.documentClass(self, initV)
[ "def", "createDocument_", "(", "self", ",", "initDict", "=", "None", ")", ":", "if", "initDict", "is", "None", ":", "initV", "=", "{", "}", "else", ":", "initV", "=", "initDict", "return", "self", ".", "documentClass", "(", "self", ",", "initV", ")" ]
create and returns a completely empty document or one populated with initDict
[ "create", "and", "returns", "a", "completely", "empty", "document", "or", "one", "populated", "with", "initDict" ]
python
train
klen/muffin-metrics
muffin_metrics.py
https://github.com/klen/muffin-metrics/blob/b62fc25172e3e1e9fc6dc6c8da3170935ee69f01/muffin_metrics.py#L226-L232
def _send(self, *messages): """Send messages.""" if not self.transport: return False message = '\n'.join(messages) + '\n' self.transport.write(message.encode('ascii'))
[ "def", "_send", "(", "self", ",", "*", "messages", ")", ":", "if", "not", "self", ".", "transport", ":", "return", "False", "message", "=", "'\\n'", ".", "join", "(", "messages", ")", "+", "'\\n'", "self", ".", "transport", ".", "write", "(", "messag...
Send messages.
[ "Send", "messages", "." ]
python
train
upsight/doctor
doctor/docs/base.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/docs/base.py#L495-L505
def class_name_to_resource_name(class_name: str) -> str: """Converts a camel case class name to a resource name with spaces. >>> class_name_to_resource_name('FooBarObject') 'Foo Bar Object' :param class_name: The name to convert. :returns: The resource name. """ s = re.sub('(.)([A-Z][a-z]+)', r'\1 \2', class_name) return re.sub('([a-z0-9])([A-Z])', r'\1 \2', s)
[ "def", "class_name_to_resource_name", "(", "class_name", ":", "str", ")", "->", "str", ":", "s", "=", "re", ".", "sub", "(", "'(.)([A-Z][a-z]+)'", ",", "r'\\1 \\2'", ",", "class_name", ")", "return", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "r'\\1 ...
Converts a camel case class name to a resource name with spaces. >>> class_name_to_resource_name('FooBarObject') 'Foo Bar Object' :param class_name: The name to convert. :returns: The resource name.
[ "Converts", "a", "camel", "case", "class", "name", "to", "a", "resource", "name", "with", "spaces", "." ]
python
train
pyhys/minimalmodbus
minimalmodbus.py
https://github.com/pyhys/minimalmodbus/blob/e99f4d74c83258c6039073082955ac9bed3f2155/minimalmodbus.py#L802-L932
def _communicate(self, request, number_of_bytes_to_read): """Talk to the slave via a serial port. Args: request (str): The raw request that is to be sent to the slave. number_of_bytes_to_read (int): number of bytes to read Returns: The raw data (string) returned from the slave. Raises: TypeError, ValueError, IOError Note that the answer might have strange ASCII control signs, which makes it difficult to print it in the promt (messes up a bit). Use repr() to make the string printable (shows ASCII values for control signs.) Will block until reaching *number_of_bytes_to_read* or timeout. If the attribute :attr:`Instrument.debug` is :const:`True`, the communication details are printed. If the attribute :attr:`Instrument.close_port_after_each_call` is :const:`True` the serial port is closed after each call. Timing:: Request from master (Master is writing) | | Response from slave (Master is reading) | | ----W----R----------------------------W-------R---------------------------------------- | | | |<----- Silent period ------>| | | | Roundtrip time ---->|-------|<-- The resolution for Python's time.time() is lower on Windows than on Linux. It is about 16 ms on Windows according to http://stackoverflow.com/questions/157359/accurate-timestamping-in-python For Python3, the information sent to and from pySerial should be of the type bytes. This is taken care of automatically by MinimalModbus. """ _checkString(request, minlength=1, description='request') _checkInt(number_of_bytes_to_read) if self.debug: _print_out('\nMinimalModbus debug mode. Writing to instrument (expecting {} bytes back): {!r} ({})'. \ format(number_of_bytes_to_read, request, _hexlify(request))) if self.close_port_after_each_call: self.serial.open() #self.serial.flushInput() TODO if sys.version_info[0] > 2: request = bytes(request, encoding='latin1') # Convert types to make it Python3 compatible # Sleep to make sure 3.5 character times have passed minimum_silent_period = _calculate_minimum_silent_period(self.serial.baudrate) time_since_read = time.time() - _LATEST_READ_TIMES.get(self.serial.port, 0) if time_since_read < minimum_silent_period: sleep_time = minimum_silent_period - time_since_read if self.debug: template = 'MinimalModbus debug mode. Sleeping for {:.1f} ms. ' + \ 'Minimum silent period: {:.1f} ms, time since read: {:.1f} ms.' text = template.format( sleep_time * _SECONDS_TO_MILLISECONDS, minimum_silent_period * _SECONDS_TO_MILLISECONDS, time_since_read * _SECONDS_TO_MILLISECONDS) _print_out(text) time.sleep(sleep_time) elif self.debug: template = 'MinimalModbus debug mode. No sleep required before write. ' + \ 'Time since previous read: {:.1f} ms, minimum silent period: {:.2f} ms.' text = template.format( time_since_read * _SECONDS_TO_MILLISECONDS, minimum_silent_period * _SECONDS_TO_MILLISECONDS) _print_out(text) # Write request latest_write_time = time.time() self.serial.write(request) # Read and discard local echo if self.handle_local_echo: localEchoToDiscard = self.serial.read(len(request)) if self.debug: template = 'MinimalModbus debug mode. Discarding this local echo: {!r} ({} bytes).' text = template.format(localEchoToDiscard, len(localEchoToDiscard)) _print_out(text) if localEchoToDiscard != request: template = 'Local echo handling is enabled, but the local echo does not match the sent request. ' + \ 'Request: {!r} ({} bytes), local echo: {!r} ({} bytes).' text = template.format(request, len(request), localEchoToDiscard, len(localEchoToDiscard)) raise IOError(text) # Read response answer = self.serial.read(number_of_bytes_to_read) _LATEST_READ_TIMES[self.serial.port] = time.time() if self.close_port_after_each_call: self.serial.close() if sys.version_info[0] > 2: answer = str(answer, encoding='latin1') # Convert types to make it Python3 compatible if self.debug: template = 'MinimalModbus debug mode. Response from instrument: {!r} ({}) ({} bytes), ' + \ 'roundtrip time: {:.1f} ms. Timeout setting: {:.1f} ms.\n' text = template.format( answer, _hexlify(answer), len(answer), (_LATEST_READ_TIMES.get(self.serial.port, 0) - latest_write_time) * _SECONDS_TO_MILLISECONDS, self.serial.timeout * _SECONDS_TO_MILLISECONDS) _print_out(text) if len(answer) == 0: raise IOError('No communication with the instrument (no answer)') return answer
[ "def", "_communicate", "(", "self", ",", "request", ",", "number_of_bytes_to_read", ")", ":", "_checkString", "(", "request", ",", "minlength", "=", "1", ",", "description", "=", "'request'", ")", "_checkInt", "(", "number_of_bytes_to_read", ")", "if", "self", ...
Talk to the slave via a serial port. Args: request (str): The raw request that is to be sent to the slave. number_of_bytes_to_read (int): number of bytes to read Returns: The raw data (string) returned from the slave. Raises: TypeError, ValueError, IOError Note that the answer might have strange ASCII control signs, which makes it difficult to print it in the promt (messes up a bit). Use repr() to make the string printable (shows ASCII values for control signs.) Will block until reaching *number_of_bytes_to_read* or timeout. If the attribute :attr:`Instrument.debug` is :const:`True`, the communication details are printed. If the attribute :attr:`Instrument.close_port_after_each_call` is :const:`True` the serial port is closed after each call. Timing:: Request from master (Master is writing) | | Response from slave (Master is reading) | | ----W----R----------------------------W-------R---------------------------------------- | | | |<----- Silent period ------>| | | | Roundtrip time ---->|-------|<-- The resolution for Python's time.time() is lower on Windows than on Linux. It is about 16 ms on Windows according to http://stackoverflow.com/questions/157359/accurate-timestamping-in-python For Python3, the information sent to and from pySerial should be of the type bytes. This is taken care of automatically by MinimalModbus.
[ "Talk", "to", "the", "slave", "via", "a", "serial", "port", "." ]
python
train
gwastro/pycbc
pycbc/distributions/gaussian.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/distributions/gaussian.py#L162-L192
def rvs(self, size=1, param=None): """Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params. """ if param is not None: dtype = [(param, float)] else: dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) for (p,_) in dtype: sigma = numpy.sqrt(self._var[p]) mu = self._mean[p] a,b = self._bounds[p] arr[p][:] = scipy.stats.truncnorm.rvs((a-mu)/sigma, (b-mu)/sigma, loc=self._mean[p], scale=sigma, size=size) return arr
[ "def", "rvs", "(", "self", ",", "size", "=", "1", ",", "param", "=", "None", ")", ":", "if", "param", "is", "not", "None", ":", "dtype", "=", "[", "(", "param", ",", "float", ")", "]", "else", ":", "dtype", "=", "[", "(", "p", ",", "float", ...
Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params.
[ "Gives", "a", "set", "of", "random", "values", "drawn", "from", "this", "distribution", "." ]
python
train
dwavesystems/dimod
dimod/decorators.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/decorators.py#L141-L170
def bqm_structured(f): """Decorator to raise an error if the given bqm does not match the sampler's structure. Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped function or method to accept a :obj:`.BinaryQuadraticModel` as the second input and for the :class:`.Sampler` to also be :class:`.Structured`. """ @wraps(f) def new_f(sampler, bqm, **kwargs): try: structure = sampler.structure adjacency = structure.adjacency except AttributeError: if isinstance(sampler, Structured): raise RuntimeError("something is wrong with the structured sampler") else: raise TypeError("sampler does not have a structure property") if not all(v in adjacency for v in bqm.linear): # todo: better error message raise BinaryQuadraticModelStructureError("given bqm does not match the sampler's structure") if not all(u in adjacency[v] for u, v in bqm.quadratic): # todo: better error message raise BinaryQuadraticModelStructureError("given bqm does not match the sampler's structure") return f(sampler, bqm, **kwargs) return new_f
[ "def", "bqm_structured", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "new_f", "(", "sampler", ",", "bqm", ",", "*", "*", "kwargs", ")", ":", "try", ":", "structure", "=", "sampler", ".", "structure", "adjacency", "=", "structure", ".", ...
Decorator to raise an error if the given bqm does not match the sampler's structure. Designed to be applied to :meth:`.Sampler.sample`. Expects the wrapped function or method to accept a :obj:`.BinaryQuadraticModel` as the second input and for the :class:`.Sampler` to also be :class:`.Structured`.
[ "Decorator", "to", "raise", "an", "error", "if", "the", "given", "bqm", "does", "not", "match", "the", "sampler", "s", "structure", "." ]
python
train
a1ezzz/wasp-general
wasp_general/network/clients/webdav.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/network/clients/webdav.py#L157-L160
def make_directory(self, directory_name, *args, **kwargs): """ :meth:`.WNetworkClientProto.make_directory` method implementation """ self.dav_client().mkdir(self.join_path(self.session_path(), directory_name))
[ "def", "make_directory", "(", "self", ",", "directory_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "dav_client", "(", ")", ".", "mkdir", "(", "self", ".", "join_path", "(", "self", ".", "session_path", "(", ")", ",", "dire...
:meth:`.WNetworkClientProto.make_directory` method implementation
[ ":", "meth", ":", ".", "WNetworkClientProto", ".", "make_directory", "method", "implementation" ]
python
train
spacetelescope/pysynphot
pysynphot/units.py
https://github.com/spacetelescope/pysynphot/blob/a125ff956f4d94beb157bd51899747a13234bb97/pysynphot/units.py#L27-L60
def Units(uname): """Generate a unit object. Parameters ---------- uname : str Wavelength or flux unit name. Returns ------- unit : `BaseUnit` or `None` Unit object. `None` means unitless. Raises ------ ValueError Unknown unit name. """ if isinstance(uname,BaseUnit): return uname else: try: if issubclass(uname,BaseUnit): return uname() except TypeError: try: return factory(uname) except KeyError: if uname == str(None): return None else: raise ValueError("Unknown units %s"%uname)
[ "def", "Units", "(", "uname", ")", ":", "if", "isinstance", "(", "uname", ",", "BaseUnit", ")", ":", "return", "uname", "else", ":", "try", ":", "if", "issubclass", "(", "uname", ",", "BaseUnit", ")", ":", "return", "uname", "(", ")", "except", "Type...
Generate a unit object. Parameters ---------- uname : str Wavelength or flux unit name. Returns ------- unit : `BaseUnit` or `None` Unit object. `None` means unitless. Raises ------ ValueError Unknown unit name.
[ "Generate", "a", "unit", "object", "." ]
python
train
aliyun/aliyun-odps-python-sdk
odps/df/expr/datetimes.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/df/expr/datetimes.py#L266-L279
def _strftime(expr, date_format): """ Return formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in python string format doc :param expr: :param date_format: date format string (e.g. “%Y-%m-%d”) :type date_format: str :return: """ return datetime_op(expr, Strftime, output_type=types.string, _date_format=date_format)
[ "def", "_strftime", "(", "expr", ",", "date_format", ")", ":", "return", "datetime_op", "(", "expr", ",", "Strftime", ",", "output_type", "=", "types", ".", "string", ",", "_date_format", "=", "date_format", ")" ]
Return formatted strings specified by date_format, which supports the same string format as the python standard library. Details of the string format can be found in python string format doc :param expr: :param date_format: date format string (e.g. “%Y-%m-%d”) :type date_format: str :return:
[ "Return", "formatted", "strings", "specified", "by", "date_format", "which", "supports", "the", "same", "string", "format", "as", "the", "python", "standard", "library", ".", "Details", "of", "the", "string", "format", "can", "be", "found", "in", "python", "st...
python
train
pandas-dev/pandas
pandas/core/dtypes/base.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/base.py#L75-L113
def is_dtype(cls, dtype): """Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``. """ dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)): # https://github.com/pandas-dev/pandas/issues/22960 # avoid passing data to `construct_from_string`. This could # cause a FutureWarning from numpy about failing elementwise # comparison from, e.g., comparing DataFrame == 'category'. return False elif dtype is None: return False elif isinstance(dtype, cls): return True try: return cls.construct_from_string(dtype) is not None except TypeError: return False
[ "def", "is_dtype", "(", "cls", ",", "dtype", ")", ":", "dtype", "=", "getattr", "(", "dtype", ",", "'dtype'", ",", "dtype", ")", "if", "isinstance", "(", "dtype", ",", "(", "ABCSeries", ",", "ABCIndexClass", ",", "ABCDataFrame", ",", "np", ".", "dtype"...
Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``.
[ "Check", "if", "we", "match", "dtype", "." ]
python
train
saltstack/salt
salt/states/ipmi.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/ipmi.py#L150-L257
def user_present(name, uid, password, channel=14, callback=False, link_auth=True, ipmi_msg=True, privilege_level='administrator', **kwargs): ''' Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} org_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) change = False if org_user['access']['callback'] != callback: change = True if org_user['access']['link_auth'] != link_auth: change = True if org_user['access']['ipmi_msg'] != ipmi_msg: change = True if org_user['access']['privilege_level'] != privilege_level: change = True if __salt__['ipmi.set_user_password'](uid, mode='test_password', password=password, **kwargs) is False: change = True if change is False: ret['result'] = True ret['comment'] = 'user already present' return ret if __opts__['test']: ret['comment'] = 'would (re)create user' ret['result'] = None ret['changes'] = {'old': org_user, 'new': name} return ret __salt__['ipmi.ensure_user'](uid, name, password, channel, callback, link_auth, ipmi_msg, privilege_level, **kwargs) current_user = __salt__['ipmi.get_user'](uid=uid, channel=channel, **kwargs) ret['comment'] = '(re)created user' ret['result'] = True ret['changes'] = {'old': org_user, 'new': current_user} return ret
[ "def", "user_present", "(", "name", ",", "uid", ",", "password", ",", "channel", "=", "14", ",", "callback", "=", "False", ",", "link_auth", "=", "True", ",", "ipmi_msg", "=", "True", ",", "privilege_level", "=", "'administrator'", ",", "*", "*", "kwargs...
Ensure IPMI user and user privileges. name name of user (limit 16 bytes) uid user id number (1 to 7) password user password (limit 16 bytes) channel ipmi channel defaults to 14 for auto callback User Restricted to Callback False = User Privilege Limit is determined by the User Privilege Limit parameter privilege_level, for both callback and non-callback connections. True = User Privilege Limit is determined by the privilege_level parameter for callback connections, but is restricted to Callback level for non-callback connections. Thus, a user can only initiate a Callback when they 'call in' to the BMC, but once the callback connection has been made, the user could potentially establish a session as an Operator. link_auth User Link authentication True/False user name and password information will be used for link authentication, e.g. PPP CHAP) for the given channel. Link authentication itself is a global setting for the channel and is enabled/disabled via the serial/modem configuration parameters. ipmi_msg User IPMI Messaging True/False user name and password information will be used for IPMI Messaging. In this case, 'IPMI Messaging' refers to the ability to execute generic IPMI commands that are not associated with a particular payload type. For example, if IPMI Messaging is disabled for a user, but that user is enabled for activating the SOL payload type, then IPMI commands associated with SOL and session management, such as Get SOL Configuration Parameters and Close Session are available, but generic IPMI commands such as Get SEL Time are unavailable.) ipmi_msg privilege_level * callback * user * operator * administrator * proprietary * no_access kwargs - api_host=localhost - api_user=admin - api_pass= - api_port=623 - api_kg=None
[ "Ensure", "IPMI", "user", "and", "user", "privileges", "." ]
python
train
h2oai/h2o-3
h2o-py/h2o/model/model_base.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/model_base.py#L77-L89
def actual_params(self): """Dictionary of actual parameters of the model.""" params_to_select = {"model_id": "name", "response_column": "column_name", "training_frame": "name", "validation_frame": "name"} params = {} for p in self.parms: if p in params_to_select.keys(): params[p] = self.parms[p]["actual_value"].get(params_to_select[p], None) else: params[p] = self.parms[p]["actual_value"] return params
[ "def", "actual_params", "(", "self", ")", ":", "params_to_select", "=", "{", "\"model_id\"", ":", "\"name\"", ",", "\"response_column\"", ":", "\"column_name\"", ",", "\"training_frame\"", ":", "\"name\"", ",", "\"validation_frame\"", ":", "\"name\"", "}", "params",...
Dictionary of actual parameters of the model.
[ "Dictionary", "of", "actual", "parameters", "of", "the", "model", "." ]
python
test
pokerregion/poker
poker/website/pocketfives.py
https://github.com/pokerregion/poker/blob/2d8cf208fdf2b26bdc935972dcbe7a983a9e9768/poker/website/pocketfives.py#L31-L50
def get_ranked_players(): """Get the list of the first 100 ranked players.""" rankings_page = requests.get(RANKINGS_URL) root = etree.HTML(rankings_page.text) player_rows = root.xpath('//div[@id="ranked"]//tr') for row in player_rows[1:]: player_row = row.xpath('td[@class!="country"]//text()') yield _Player( name=player_row[1], country=row[1][0].get('title'), triple_crowns=player_row[3], monthly_win=player_row[4], biggest_cash=player_row[5], plb_score=player_row[6], biggest_score=player_row[7], average_score=player_row[8], previous_rank=player_row[9], )
[ "def", "get_ranked_players", "(", ")", ":", "rankings_page", "=", "requests", ".", "get", "(", "RANKINGS_URL", ")", "root", "=", "etree", ".", "HTML", "(", "rankings_page", ".", "text", ")", "player_rows", "=", "root", ".", "xpath", "(", "'//div[@id=\"ranked...
Get the list of the first 100 ranked players.
[ "Get", "the", "list", "of", "the", "first", "100", "ranked", "players", "." ]
python
train
sepandhaghighi/pycm
pycm/pycm_util.py
https://github.com/sepandhaghighi/pycm/blob/cb03258afd6a821d10acba73c965aaac174bedcd/pycm/pycm_util.py#L59-L73
def class_filter(classes, class_name): """ Filter classes by comparing two lists. :param classes: matrix classes :type classes: list :param class_name: sub set of classes :type class_name : list :return: filtered classes as list """ result_classes = classes if isinstance(class_name, list): if set(class_name) <= set(classes): result_classes = class_name return result_classes
[ "def", "class_filter", "(", "classes", ",", "class_name", ")", ":", "result_classes", "=", "classes", "if", "isinstance", "(", "class_name", ",", "list", ")", ":", "if", "set", "(", "class_name", ")", "<=", "set", "(", "classes", ")", ":", "result_classes"...
Filter classes by comparing two lists. :param classes: matrix classes :type classes: list :param class_name: sub set of classes :type class_name : list :return: filtered classes as list
[ "Filter", "classes", "by", "comparing", "two", "lists", "." ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L2105-L2173
def rename(self, old_file_path, new_file_path, force_replace=False): """Renames a FakeFile object at old_file_path to new_file_path, preserving all properties. Args: old_file_path: Path to filesystem object to rename. new_file_path: Path to where the filesystem object will live after this call. force_replace: If set and destination is an existing file, it will be replaced even under Windows if the user has permissions, otherwise replacement happens under Unix only. Raises: OSError: if old_file_path does not exist. OSError: if new_file_path is an existing directory (Windows, or Posix if old_file_path points to a regular file) OSError: if old_file_path is a directory and new_file_path a file OSError: if new_file_path is an existing file and force_replace not set (Windows only). OSError: if new_file_path is an existing file and could not be removed (Posix, or Windows with force_replace set). OSError: if dirname(new_file_path) does not exist. OSError: if the file would be moved to another filesystem (e.g. mount point). """ ends_with_sep = self.ends_with_path_separator(old_file_path) old_file_path = self.absnormpath(old_file_path) new_file_path = self.absnormpath(new_file_path) if not self.exists(old_file_path, check_link=True): self.raise_os_error(errno.ENOENT, old_file_path, 2) if ends_with_sep: self._handle_broken_link_with_trailing_sep(old_file_path) old_object = self.lresolve(old_file_path) if not self.is_windows_fs: self._handle_posix_dir_link_errors( new_file_path, old_file_path, ends_with_sep) if self.exists(new_file_path, check_link=True): new_file_path = self._rename_to_existing_path( force_replace, new_file_path, old_file_path, old_object, ends_with_sep) if not new_file_path: return old_dir, old_name = self.splitpath(old_file_path) new_dir, new_name = self.splitpath(new_file_path) if not self.exists(new_dir): self.raise_os_error(errno.ENOENT, new_dir) old_dir_object = self.resolve(old_dir) new_dir_object = self.resolve(new_dir) if old_dir_object.st_dev != new_dir_object.st_dev: self.raise_os_error(errno.EXDEV, old_file_path) if not S_ISDIR(new_dir_object.st_mode): self.raise_os_error( errno.EACCES if self.is_windows_fs else errno.ENOTDIR, new_file_path) if new_dir_object.has_parent_object(old_object): self.raise_os_error(errno.EINVAL, new_file_path) object_to_rename = old_dir_object.get_entry(old_name) old_dir_object.remove_entry(old_name, recursive=False) object_to_rename.name = new_name new_name = new_dir_object._normalized_entryname(new_name) if new_name in new_dir_object.contents: # in case of overwriting remove the old entry first new_dir_object.remove_entry(new_name) new_dir_object.add_entry(object_to_rename)
[ "def", "rename", "(", "self", ",", "old_file_path", ",", "new_file_path", ",", "force_replace", "=", "False", ")", ":", "ends_with_sep", "=", "self", ".", "ends_with_path_separator", "(", "old_file_path", ")", "old_file_path", "=", "self", ".", "absnormpath", "(...
Renames a FakeFile object at old_file_path to new_file_path, preserving all properties. Args: old_file_path: Path to filesystem object to rename. new_file_path: Path to where the filesystem object will live after this call. force_replace: If set and destination is an existing file, it will be replaced even under Windows if the user has permissions, otherwise replacement happens under Unix only. Raises: OSError: if old_file_path does not exist. OSError: if new_file_path is an existing directory (Windows, or Posix if old_file_path points to a regular file) OSError: if old_file_path is a directory and new_file_path a file OSError: if new_file_path is an existing file and force_replace not set (Windows only). OSError: if new_file_path is an existing file and could not be removed (Posix, or Windows with force_replace set). OSError: if dirname(new_file_path) does not exist. OSError: if the file would be moved to another filesystem (e.g. mount point).
[ "Renames", "a", "FakeFile", "object", "at", "old_file_path", "to", "new_file_path", "preserving", "all", "properties", "." ]
python
train
treycucco/pyebnf
pyebnf/compiler.py
https://github.com/treycucco/pyebnf/blob/3634ddabbe5d73508bcc20f4a591f86a46634e1d/pyebnf/compiler.py#L78-L82
def comments(self): """The AST comments.""" if self._comments is None: self._comments = [c for c in self.grammar.children if c.is_type(TokenType.comment)] return self._comments
[ "def", "comments", "(", "self", ")", ":", "if", "self", ".", "_comments", "is", "None", ":", "self", ".", "_comments", "=", "[", "c", "for", "c", "in", "self", ".", "grammar", ".", "children", "if", "c", ".", "is_type", "(", "TokenType", ".", "comm...
The AST comments.
[ "The", "AST", "comments", "." ]
python
test
LogicalDash/LiSE
ELiDE/ELiDE/card.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/ELiDE/ELiDE/card.py#L697-L726
def on_touch_up(self, touch): """If a card is being dragged, put it in the place it was just dropped and trigger a layout. """ if ( 'card' not in touch.ud or 'layout' not in touch.ud or touch.ud['layout'] != self ): return if hasattr(touch.ud['card'], '_topdecked'): self.canvas.after.remove(touch.ud['card']._topdecked) del touch.ud['card']._topdecked if None not in (self.insertion_deck, self.insertion_card): # need to sync to adapter.data?? card = touch.ud['card'] del card.parent.decks[card.deck][card.idx] for i in range(0, len(card.parent.decks[card.deck])): card.parent.decks[card.deck][i].idx = i deck = self.decks[self.insertion_deck] if self.insertion_card >= len(deck): deck.append(card) else: deck.insert(self.insertion_card, card) card.deck = self.insertion_deck card.idx = self.insertion_card self.decks[self.insertion_deck] = deck self.insertion_deck = self.insertion_card = None self._trigger_layout()
[ "def", "on_touch_up", "(", "self", ",", "touch", ")", ":", "if", "(", "'card'", "not", "in", "touch", ".", "ud", "or", "'layout'", "not", "in", "touch", ".", "ud", "or", "touch", ".", "ud", "[", "'layout'", "]", "!=", "self", ")", ":", "return", ...
If a card is being dragged, put it in the place it was just dropped and trigger a layout.
[ "If", "a", "card", "is", "being", "dragged", "put", "it", "in", "the", "place", "it", "was", "just", "dropped", "and", "trigger", "a", "layout", "." ]
python
train
heuer/cablemap
cablemap.core/cablemap/core/predicates.py
https://github.com/heuer/cablemap/blob/42066c8fc2972d237a2c35578e14525aaf705f38/cablemap.core/cablemap/core/predicates.py#L583-L592
def origin_canada(origin): """\ Returns if the origin is Canada. `origin` The origin to check. """ return origin in ( u'CALGARY', u'HALIFAX', u'MONTREAL', u'QUEBEC', u'OTTAWA', u'TORONTO', u'VANCOUVER')
[ "def", "origin_canada", "(", "origin", ")", ":", "return", "origin", "in", "(", "u'CALGARY'", ",", "u'HALIFAX'", ",", "u'MONTREAL'", ",", "u'QUEBEC'", ",", "u'OTTAWA'", ",", "u'TORONTO'", ",", "u'VANCOUVER'", ")" ]
\ Returns if the origin is Canada. `origin` The origin to check.
[ "\\", "Returns", "if", "the", "origin", "is", "Canada", "." ]
python
train
rootpy/rootpy
rootpy/tree/cut.py
https://github.com/rootpy/rootpy/blob/3926935e1f2100d8ba68070c2ab44055d4800f73/rootpy/tree/cut.py#L222-L248
def safe(self, parentheses=True): """ Returns a string representation with special characters replaced by safer characters for use in file names. """ if not self: return "" string = str(self) string = string.replace("**", "_pow_") string = string.replace("*", "_mul_") string = string.replace("/", "_div_") string = string.replace("==", "_eq_") string = string.replace("<=", "_leq_") string = string.replace(">=", "_geq_") string = string.replace("<", "_lt_") string = string.replace(">", "_gt_") string = string.replace("&&", "_and_") string = string.replace("||", "_or_") string = string.replace("!", "not_") if parentheses: string = string.replace("(", "L") string = string.replace(")", "R") else: string = string.replace("(", "") string = string.replace(")", "") string = string.replace(" ", "") return string
[ "def", "safe", "(", "self", ",", "parentheses", "=", "True", ")", ":", "if", "not", "self", ":", "return", "\"\"", "string", "=", "str", "(", "self", ")", "string", "=", "string", ".", "replace", "(", "\"**\"", ",", "\"_pow_\"", ")", "string", "=", ...
Returns a string representation with special characters replaced by safer characters for use in file names.
[ "Returns", "a", "string", "representation", "with", "special", "characters", "replaced", "by", "safer", "characters", "for", "use", "in", "file", "names", "." ]
python
train
benfb/dars
dars/__init__.py
https://github.com/benfb/dars/blob/66778de8314f7dcec50ef706abcea84a9b3d9c7e/dars/__init__.py#L27-L42
def findGap(song): """Return the position of silence in a song""" try: silence = pd.silence.detect_silence(song) except IOError: print("There isn't a song there!") maxlength = 0 for pair in silence: length = pair[1] - pair[0] if length >= maxlength: maxlength = length gap = pair return gap
[ "def", "findGap", "(", "song", ")", ":", "try", ":", "silence", "=", "pd", ".", "silence", ".", "detect_silence", "(", "song", ")", "except", "IOError", ":", "print", "(", "\"There isn't a song there!\"", ")", "maxlength", "=", "0", "for", "pair", "in", ...
Return the position of silence in a song
[ "Return", "the", "position", "of", "silence", "in", "a", "song" ]
python
train
eng-tools/sfsimodels
sfsimodels/models/foundations.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/foundations.py#L282-L293
def i_ll(self): """ Second moment of inertia around the length axis. :return: """ d_values = [] for i in range(self.n_pads_w): d_values.append(self.pad_position_w(i)) d_values = np.array(d_values) - self.width / 2 area_d_sqrd = sum(self.pad_area * d_values ** 2) * self.n_pads_l i_second = self.pad_i_ll * self.n_pads return area_d_sqrd + i_second
[ "def", "i_ll", "(", "self", ")", ":", "d_values", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "n_pads_w", ")", ":", "d_values", ".", "append", "(", "self", ".", "pad_position_w", "(", "i", ")", ")", "d_values", "=", "np", ".", "ar...
Second moment of inertia around the length axis. :return:
[ "Second", "moment", "of", "inertia", "around", "the", "length", "axis", ".", ":", "return", ":" ]
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/reftrack.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/reftrack.py#L1612-L1621
def fetch_import_ref_restriction(self,): """Fetch whether importing the reference is restricted :returns: True, if importing the reference is restricted :rtype: :class:`bool` :raises: None """ inter = self.get_refobjinter() restricted = self.status() not in (self.LOADED, self.UNLOADED) return restricted or inter.fetch_action_restriction(self, 'import_reference')
[ "def", "fetch_import_ref_restriction", "(", "self", ",", ")", ":", "inter", "=", "self", ".", "get_refobjinter", "(", ")", "restricted", "=", "self", ".", "status", "(", ")", "not", "in", "(", "self", ".", "LOADED", ",", "self", ".", "UNLOADED", ")", "...
Fetch whether importing the reference is restricted :returns: True, if importing the reference is restricted :rtype: :class:`bool` :raises: None
[ "Fetch", "whether", "importing", "the", "reference", "is", "restricted" ]
python
train
buildbot/buildbot
master/buildbot/steps/worker.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/steps/worker.py#L267-L275
def runRmFile(self, path, timeout=None, **kwargs): """ remove a file from the worker """ cmd_args = {'path': path, 'logEnviron': self.logEnviron} if timeout: cmd_args['timeout'] = timeout if self.workerVersionIsOlderThan('rmfile', '3.1'): cmd_args['dir'] = os.path.abspath(path) return self.runRemoteCommand('rmdir', cmd_args, **kwargs) return self.runRemoteCommand('rmfile', cmd_args, **kwargs)
[ "def", "runRmFile", "(", "self", ",", "path", ",", "timeout", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cmd_args", "=", "{", "'path'", ":", "path", ",", "'logEnviron'", ":", "self", ".", "logEnviron", "}", "if", "timeout", ":", "cmd_args", "["...
remove a file from the worker
[ "remove", "a", "file", "from", "the", "worker" ]
python
train
senaite/senaite.core
bika/lims/numbergenerator.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/numbergenerator.py#L43-L49
def get_storage_location(): """ get the portal with the plone.api """ location = api.portal.getSite() if location.get('bika_setup', False): location = location['bika_setup'] return location
[ "def", "get_storage_location", "(", ")", ":", "location", "=", "api", ".", "portal", ".", "getSite", "(", ")", "if", "location", ".", "get", "(", "'bika_setup'", ",", "False", ")", ":", "location", "=", "location", "[", "'bika_setup'", "]", "return", "lo...
get the portal with the plone.api
[ "get", "the", "portal", "with", "the", "plone", ".", "api" ]
python
train
mandiant/ioc_writer
ioc_writer/ioc_common.py
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_common.py#L628-L640
def make_processitem_portlist_portitem_remoteip(remote_ip, condition='is', negate=False): """ Create a node for ProcessItem/PortList/PortItem/remoteIP :return: A IndicatorItem represented as an Element node """ document = 'ProcessItem' search = 'ProcessItem/PortList/PortItem/remoteIP' content_type = 'IP' content = remote_ip ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate) return ii_node
[ "def", "make_processitem_portlist_portitem_remoteip", "(", "remote_ip", ",", "condition", "=", "'is'", ",", "negate", "=", "False", ")", ":", "document", "=", "'ProcessItem'", "search", "=", "'ProcessItem/PortList/PortItem/remoteIP'", "content_type", "=", "'IP'", "conte...
Create a node for ProcessItem/PortList/PortItem/remoteIP :return: A IndicatorItem represented as an Element node
[ "Create", "a", "node", "for", "ProcessItem", "/", "PortList", "/", "PortItem", "/", "remoteIP", ":", "return", ":", "A", "IndicatorItem", "represented", "as", "an", "Element", "node" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_xstp_ext.py#L4431-L4444
def get_stp_mst_detail_output_cist_port_link_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") cist = ET.SubElement(output, "cist") port = ET.SubElement(cist, "port") link_type = ET.SubElement(port, "link-type") link_type.text = kwargs.pop('link_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_stp_mst_detail_output_cist_port_link_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_stp_mst_detail", "=", "ET", ".", "Element", "(", "\"get_stp_mst_detail\"", ")", "config", "=...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
erikdejonge/arguments
arguments/__init__.py
https://github.com/erikdejonge/arguments/blob/fc222d3989d459343a81944cabb56854014335ed/arguments/__init__.py#L743-L750
def for_print(self): """ for_print """ s = "\033[34m" + self.get_object_info() + "\033[0m" s += "\n" s += self.as_string() return s
[ "def", "for_print", "(", "self", ")", ":", "s", "=", "\"\\033[34m\"", "+", "self", ".", "get_object_info", "(", ")", "+", "\"\\033[0m\"", "s", "+=", "\"\\n\"", "s", "+=", "self", ".", "as_string", "(", ")", "return", "s" ]
for_print
[ "for_print" ]
python
train
synw/dataswim
dataswim/db/infos.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/db/infos.py#L31-L44
def tables_(self) -> list: """Return a list of the existing tables in a database :return: list of the table names :rtype: list :example: ``tables = ds.tables_()`` """ if self._check_db() == False: return try: return self._tables() except Exception as e: self.err(e, "Can not print tables")
[ "def", "tables_", "(", "self", ")", "->", "list", ":", "if", "self", ".", "_check_db", "(", ")", "==", "False", ":", "return", "try", ":", "return", "self", ".", "_tables", "(", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", ...
Return a list of the existing tables in a database :return: list of the table names :rtype: list :example: ``tables = ds.tables_()``
[ "Return", "a", "list", "of", "the", "existing", "tables", "in", "a", "database" ]
python
train
dtmilano/AndroidViewClient
src/com/dtmilano/android/viewclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L986-L998
def longTouch(self, duration=2000): ''' Long touches this C{View} @param duration: duration in ms ''' (x, y) = self.getCenter() if self.uiAutomatorHelper: self.uiAutomatorHelper.swipe(startX=x, startY=y, endX=x, endY=y, steps=200) else: # FIXME: get orientation self.device.longTouch(x, y, duration, orientation=-1)
[ "def", "longTouch", "(", "self", ",", "duration", "=", "2000", ")", ":", "(", "x", ",", "y", ")", "=", "self", ".", "getCenter", "(", ")", "if", "self", ".", "uiAutomatorHelper", ":", "self", ".", "uiAutomatorHelper", ".", "swipe", "(", "startX", "="...
Long touches this C{View} @param duration: duration in ms
[ "Long", "touches", "this", "C", "{", "View", "}" ]
python
train
aouyar/PyMunin
pysysinfo/diskio.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/diskio.py#L113-L137
def _initDMinfo(self): """Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs. """ self._mapLVtuple2dm = {} self._mapLVname2dm = {} self._vgTree = {} if self._dmMajorNum is None: self._initBlockMajorMap() for file in os.listdir(devmapperDir): mobj = re.match('([a-zA-Z0-9+_.\-]*[a-zA-Z0-9+_.])-([a-zA-Z0-9+_.][a-zA-Z0-9+_.\-]*)$', file) if mobj: path = os.path.join(devmapperDir, file) (major, minor) = self._getDevMajorMinor(path) if major == self._dmMajorNum: vg = mobj.group(1).replace('--', '-') lv = mobj.group(2).replace('--', '-') dmdev = "dm-%d" % minor self._mapLVtuple2dm[(vg,lv)] = dmdev self._mapLVname2dm[file] = dmdev if not vg in self._vgTree: self._vgTree[vg] = [] self._vgTree[vg].append(lv)
[ "def", "_initDMinfo", "(", "self", ")", ":", "self", ".", "_mapLVtuple2dm", "=", "{", "}", "self", ".", "_mapLVname2dm", "=", "{", "}", "self", ".", "_vgTree", "=", "{", "}", "if", "self", ".", "_dmMajorNum", "is", "None", ":", "self", ".", "_initBlo...
Check files in /dev/mapper to initialize data structures for mappings between device-mapper devices, minor device numbers, VGs and LVs.
[ "Check", "files", "in", "/", "dev", "/", "mapper", "to", "initialize", "data", "structures", "for", "mappings", "between", "device", "-", "mapper", "devices", "minor", "device", "numbers", "VGs", "and", "LVs", "." ]
python
train
automl/HpBandSter
hpbandster/core/base_iteration.py
https://github.com/automl/HpBandSter/blob/841db4b827f342e5eb7f725723ea6461ac52d45a/hpbandster/core/base_iteration.py#L105-L139
def register_result(self, job, skip_sanity_checks=False): """ function to register the result of a job This function is called from HB_master, don't call this from your script. """ if self.is_finished: raise RuntimeError("This HB iteration is finished, you can't register more results!") config_id = job.id config = job.kwargs['config'] budget = job.kwargs['budget'] timestamps = job.timestamps result = job.result exception = job.exception d = self.data[config_id] if not skip_sanity_checks: assert d.config == config, 'Configurations differ!' assert d.status == 'RUNNING', "Configuration wasn't scheduled for a run." assert d.budget == budget, 'Budgets differ (%f != %f)!'%(self.data[config_id]['budget'], budget) d.time_stamps[budget] = timestamps d.results[budget] = result if (not job.result is None) and np.isfinite(result['loss']): d.status = 'REVIEW' else: d.status = 'CRASHED' d.exceptions[budget] = exception self.num_running -= 1
[ "def", "register_result", "(", "self", ",", "job", ",", "skip_sanity_checks", "=", "False", ")", ":", "if", "self", ".", "is_finished", ":", "raise", "RuntimeError", "(", "\"This HB iteration is finished, you can't register more results!\"", ")", "config_id", "=", "jo...
function to register the result of a job This function is called from HB_master, don't call this from your script.
[ "function", "to", "register", "the", "result", "of", "a", "job" ]
python
train
markovmodel/PyEMMA
pyemma/coordinates/data/featurization/featurizer.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/coordinates/data/featurization/featurizer.py#L624-L649
def add_angles(self, indexes, deg=False, cossin=False, periodic=True): """ Adds the list of angles to the feature list Parameters ---------- indexes : np.ndarray, shape=(num_pairs, 3), dtype=int an array with triplets of atom indices deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention. """ from .angles import AngleFeature indexes = self._check_indices(indexes, pair_n=3) f = AngleFeature(self.topology, indexes, deg=deg, cossin=cossin, periodic=periodic) self.__add_feature(f)
[ "def", "add_angles", "(", "self", ",", "indexes", ",", "deg", "=", "False", ",", "cossin", "=", "False", ",", "periodic", "=", "True", ")", ":", "from", ".", "angles", "import", "AngleFeature", "indexes", "=", "self", ".", "_check_indices", "(", "indexes...
Adds the list of angles to the feature list Parameters ---------- indexes : np.ndarray, shape=(num_pairs, 3), dtype=int an array with triplets of atom indices deg : bool, optional, default = False If False (default), angles will be computed in radians. If True, angles will be computed in degrees. cossin : bool, optional, default = False If True, each angle will be returned as a pair of (sin(x), cos(x)). This is useful, if you calculate the mean (e.g TICA/PCA, clustering) in that space. periodic : bool, optional, default = True If `periodic` is True and the trajectory contains unitcell information, we will treat dihedrals that cross periodic images using the minimum image convention.
[ "Adds", "the", "list", "of", "angles", "to", "the", "feature", "list" ]
python
train
zeroSteiner/smoke-zephyr
smoke_zephyr/argparse_types.py
https://github.com/zeroSteiner/smoke-zephyr/blob/a6d2498aeacc72ee52e7806f783a4d83d537ffb2/smoke_zephyr/argparse_types.py#L110-L114
def log_level_type(arg): """An argparse type representing a logging level.""" if not arg.upper() in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'): raise argparse.ArgumentTypeError("{0} is not a valid log level".format(repr(arg))) return getattr(logging, arg.upper())
[ "def", "log_level_type", "(", "arg", ")", ":", "if", "not", "arg", ".", "upper", "(", ")", "in", "(", "'NOTSET'", ",", "'DEBUG'", ",", "'INFO'", ",", "'WARNING'", ",", "'ERROR'", ",", "'CRITICAL'", ")", ":", "raise", "argparse", ".", "ArgumentTypeError",...
An argparse type representing a logging level.
[ "An", "argparse", "type", "representing", "a", "logging", "level", "." ]
python
train
rigetti/grove
grove/pyqaoa/numpartition_qaoa.py
https://github.com/rigetti/grove/blob/dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3/grove/pyqaoa/numpartition_qaoa.py#L25-L57
def numpart_qaoa(asset_list, A=1.0, minimizer_kwargs=None, steps=1): """ generate number partition driver and cost functions :param asset_list: list to binary partition :param A: (float) optional constant for level separation. Default=1. :param minimizer_kwargs: Arguments for the QAOA minimizer :param steps: (int) number of steps approximating the solution. """ cost_operators = [] ref_operators = [] for ii in range(len(asset_list)): for jj in range(ii + 1, len(asset_list)): cost_operators.append(PauliSum([PauliTerm("Z", ii, 2*asset_list[ii]) * PauliTerm("Z", jj, A*asset_list[jj])])) ref_operators.append(PauliSum([PauliTerm("X", ii, -1.0)])) cost_operators.append(PauliSum([PauliTerm("I", 0, len(asset_list))])) if minimizer_kwargs is None: minimizer_kwargs = {'method': 'Nelder-Mead', 'options': {'ftol': 1.0e-2, 'xtol': 1.0e-2, 'disp': True}} qc = get_qc(f"{len(asset_list)}q-qvm") qaoa_inst = QAOA(qc, list(range(len(asset_list))), steps=steps, cost_ham=cost_operators, ref_ham=ref_operators, store_basis=True, minimizer=minimize, minimizer_kwargs=minimizer_kwargs, vqe_options={'disp': print}) return qaoa_inst
[ "def", "numpart_qaoa", "(", "asset_list", ",", "A", "=", "1.0", ",", "minimizer_kwargs", "=", "None", ",", "steps", "=", "1", ")", ":", "cost_operators", "=", "[", "]", "ref_operators", "=", "[", "]", "for", "ii", "in", "range", "(", "len", "(", "ass...
generate number partition driver and cost functions :param asset_list: list to binary partition :param A: (float) optional constant for level separation. Default=1. :param minimizer_kwargs: Arguments for the QAOA minimizer :param steps: (int) number of steps approximating the solution.
[ "generate", "number", "partition", "driver", "and", "cost", "functions" ]
python
train
stephanepechard/projy
projy/cmdline.py
https://github.com/stephanepechard/projy/blob/3146b0e3c207b977e1b51fcb33138746dae83c23/projy/cmdline.py#L54-L91
def run_info(template): """ Print information about a specific template. """ template.project_name = 'TowelStuff' # fake project name, always the same name = template_name_from_class_name(template.__class__.__name__) term = TerminalView() term.print_info("Content of template {} with an example project " \ "named 'TowelStuff':".format(term.text_in_color(name, TERM_GREEN))) dir_name = None for file_info in sorted(template.files(), key=lambda dir: dir[0]): directory = file_name = template_name = '' if file_info[0]: directory = file_info[0] if file_info[1]: file_name = file_info[1] if file_info[2]: template_name = '\t\t - ' + file_info[2] if (directory != dir_name): term.print_info('\n\t' + term.text_in_color(directory + '/', TERM_PINK)) dir_name = directory term.print_info('\t\t' + term.text_in_color(file_name, TERM_YELLOW) + template_name) # print substitutions try: subs = template.substitutes().keys() if len(subs) > 0: subs.sort() term.print_info("\nSubstitutions of this template are: ") max_len = 0 for key in subs: if max_len < len(key): max_len = len(key) for key in subs: term.print_info(u"\t{0:{1}} -> {2}". format(key, max_len, template.substitutes()[key])) except AttributeError: pass
[ "def", "run_info", "(", "template", ")", ":", "template", ".", "project_name", "=", "'TowelStuff'", "# fake project name, always the same", "name", "=", "template_name_from_class_name", "(", "template", ".", "__class__", ".", "__name__", ")", "term", "=", "TerminalVie...
Print information about a specific template.
[ "Print", "information", "about", "a", "specific", "template", "." ]
python
train
projectatomic/atomic-reactor
atomic_reactor/util.py
https://github.com/projectatomic/atomic-reactor/blob/fd31c01b964097210bf169960d051e5f04019a80/atomic_reactor/util.py#L634-L641
def registry_hostname(registry): """ Strip a reference to a registry to just the hostname:port """ if registry.startswith('http:') or registry.startswith('https:'): return urlparse(registry).netloc else: return registry
[ "def", "registry_hostname", "(", "registry", ")", ":", "if", "registry", ".", "startswith", "(", "'http:'", ")", "or", "registry", ".", "startswith", "(", "'https:'", ")", ":", "return", "urlparse", "(", "registry", ")", ".", "netloc", "else", ":", "return...
Strip a reference to a registry to just the hostname:port
[ "Strip", "a", "reference", "to", "a", "registry", "to", "just", "the", "hostname", ":", "port" ]
python
train
vtkiorg/vtki
vtki/plotting.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/plotting.py#L1460-L1473
def remove_bounding_box(self, loc=None): """ Removes bounding box from the active renderer. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer. """ self._active_renderer_index = self.loc_to_index(loc) renderer = self.renderers[self._active_renderer_index] renderer.remove_bounding_box()
[ "def", "remove_bounding_box", "(", "self", ",", "loc", "=", "None", ")", ":", "self", ".", "_active_renderer_index", "=", "self", ".", "loc_to_index", "(", "loc", ")", "renderer", "=", "self", ".", "renderers", "[", "self", ".", "_active_renderer_index", "]"...
Removes bounding box from the active renderer. Parameters ---------- loc : int, tuple, or list Index of the renderer to add the actor to. For example, ``loc=2`` or ``loc=(1, 1)``. If None, selects the last active Renderer.
[ "Removes", "bounding", "box", "from", "the", "active", "renderer", "." ]
python
train
HiPERCAM/hcam_widgets
hcam_widgets/misc.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/misc.py#L557-L570
def getRunNumber(g): """ Polls the data server to find the current run number. Throws exceptions if it can't determine it. """ if not g.cpars['hcam_server_on']: raise DriverError('getRunNumber error: servers are not active') url = g.cpars['hipercam_server'] + 'summary' response = urllib.request.urlopen(url, timeout=2) rs = ReadServer(response.read(), status_msg=True) if rs.ok: return rs.run else: raise DriverError('getRunNumber error: ' + str(rs.err))
[ "def", "getRunNumber", "(", "g", ")", ":", "if", "not", "g", ".", "cpars", "[", "'hcam_server_on'", "]", ":", "raise", "DriverError", "(", "'getRunNumber error: servers are not active'", ")", "url", "=", "g", ".", "cpars", "[", "'hipercam_server'", "]", "+", ...
Polls the data server to find the current run number. Throws exceptions if it can't determine it.
[ "Polls", "the", "data", "server", "to", "find", "the", "current", "run", "number", ".", "Throws", "exceptions", "if", "it", "can", "t", "determine", "it", "." ]
python
train
GNS3/gns3-server
gns3server/utils/asyncio/serial.py
https://github.com/GNS3/gns3-server/blob/a221678448fb5d24e977ef562f81d56aacc89ab1/gns3server/utils/asyncio/serial.py#L99-L110
def _asyncio_open_serial_windows(path): """ Open a windows named pipe :returns: An IO like object """ try: yield from wait_for_named_pipe_creation(path) except asyncio.TimeoutError: raise NodeError('Pipe file "{}" is missing'.format(path)) return WindowsPipe(path)
[ "def", "_asyncio_open_serial_windows", "(", "path", ")", ":", "try", ":", "yield", "from", "wait_for_named_pipe_creation", "(", "path", ")", "except", "asyncio", ".", "TimeoutError", ":", "raise", "NodeError", "(", "'Pipe file \"{}\" is missing'", ".", "format", "("...
Open a windows named pipe :returns: An IO like object
[ "Open", "a", "windows", "named", "pipe" ]
python
train
gwpy/gwpy
gwpy/io/kerberos.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/io/kerberos.py#L54-L177
def kinit(username=None, password=None, realm=None, exe=None, keytab=None, krb5ccname=None, verbose=None): """Initialise a kerberos (krb5) ticket. This allows authenticated connections to, amongst others, NDS2 services. Parameters ---------- username : `str`, optional name of user, will be prompted for if not given. password : `str`, optional cleartext password of user for given realm, will be prompted for if not given. realm : `str`, optional name of realm to authenticate against, read from keytab if available, defaults to ``'LIGO.ORG'``. exe : `str`, optional path to kinit executable. keytab : `str`, optional path to keytab file. If not given this will be read from the ``KRB5_KTNAME`` environment variable. See notes for more details. krb5ccname : `str`, optional path to Kerberos credentials cache. verbose : `bool`, optional print verbose output (if `True`), or not (`False)`; default is `True` if any user-prompting is needed, otherwise `False`. Notes ----- If a keytab is given, or is read from the ``KRB5_KTNAME`` environment variable, this will be used to guess the username and realm, if it contains only a single credential. Examples -------- Example 1: standard user input, with password prompt:: >>> kinit('albert.einstein') Password for albert.einstein@LIGO.ORG: Kerberos ticket generated for albert.einstein@LIGO.ORG Example 2: extract username and realm from keytab, and use that in authentication:: >>> kinit(keytab='~/.kerberos/ligo.org.keytab', verbose=True) Kerberos ticket generated for albert.einstein@LIGO.ORG """ # get kinit path if exe is None: exe = which('kinit') # get keytab if keytab is None: keytab = os.environ.get('KRB5_KTNAME', None) if keytab is None or not os.path.isfile(keytab): keytab = None if keytab: try: principals = parse_keytab(keytab) except KerberosError: pass else: # is there's only one entry in the keytab, use that if username is None and len(principals) == 1: username = principals[0][0] # or if the given username is in the keytab, find the realm if username in list(zip(*principals))[0]: idx = list(zip(*principals))[0].index(username) realm = principals[idx][1] # otherwise this keytab is useless, so remove it else: keytab = None # refuse to prompt if we can't get an answer # note: jupyter streams are not recognised as interactive # (isatty() returns False) so we have a special case here if not sys.stdout.isatty() and not _IPYTHON and ( username is None or (not keytab and password is None) ): raise KerberosError("cannot generate kerberos ticket in a " "non-interactive session, please manually create " "a ticket, or consider using a keytab file") # get credentials if realm is None: realm = 'LIGO.ORG' if username is None: verbose = True username = input("Please provide username for the {} kerberos " "realm: ".format(realm)) identity = '{}@{}'.format(username, realm) if not keytab and password is None: verbose = True password = getpass.getpass(prompt="Password for {}: ".format(identity)) # format kinit command if keytab: cmd = [exe, '-k', '-t', keytab, identity] else: cmd = [exe, identity] if krb5ccname: krbenv = {'KRB5CCNAME': krb5ccname} else: krbenv = None # execute command kget = subprocess.Popen(cmd, env=krbenv, stdout=subprocess.PIPE, stdin=subprocess.PIPE) if not keytab: kget.communicate(password.encode('utf-8')) kget.wait() retcode = kget.poll() if retcode: raise subprocess.CalledProcessError(kget.returncode, ' '.join(cmd)) if verbose: print("Kerberos ticket generated for {}".format(identity))
[ "def", "kinit", "(", "username", "=", "None", ",", "password", "=", "None", ",", "realm", "=", "None", ",", "exe", "=", "None", ",", "keytab", "=", "None", ",", "krb5ccname", "=", "None", ",", "verbose", "=", "None", ")", ":", "# get kinit path", "if...
Initialise a kerberos (krb5) ticket. This allows authenticated connections to, amongst others, NDS2 services. Parameters ---------- username : `str`, optional name of user, will be prompted for if not given. password : `str`, optional cleartext password of user for given realm, will be prompted for if not given. realm : `str`, optional name of realm to authenticate against, read from keytab if available, defaults to ``'LIGO.ORG'``. exe : `str`, optional path to kinit executable. keytab : `str`, optional path to keytab file. If not given this will be read from the ``KRB5_KTNAME`` environment variable. See notes for more details. krb5ccname : `str`, optional path to Kerberos credentials cache. verbose : `bool`, optional print verbose output (if `True`), or not (`False)`; default is `True` if any user-prompting is needed, otherwise `False`. Notes ----- If a keytab is given, or is read from the ``KRB5_KTNAME`` environment variable, this will be used to guess the username and realm, if it contains only a single credential. Examples -------- Example 1: standard user input, with password prompt:: >>> kinit('albert.einstein') Password for albert.einstein@LIGO.ORG: Kerberos ticket generated for albert.einstein@LIGO.ORG Example 2: extract username and realm from keytab, and use that in authentication:: >>> kinit(keytab='~/.kerberos/ligo.org.keytab', verbose=True) Kerberos ticket generated for albert.einstein@LIGO.ORG
[ "Initialise", "a", "kerberos", "(", "krb5", ")", "ticket", "." ]
python
train
Azure/msrestazure-for-python
msrestazure/polling/arm_polling.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/polling/arm_polling.py#L142-L156
def _is_empty(self, response): """Check if response body contains meaningful content. :rtype: bool :raises: DeserializationError if response body contains invalid json data. """ # Assume ClientResponse has "body", and otherwise it's a requests.Response content = response.text() if hasattr(response, "body") else response.text if not content: return True try: return not json.loads(content) except ValueError: raise DeserializationError( "Error occurred in deserializing the response body.")
[ "def", "_is_empty", "(", "self", ",", "response", ")", ":", "# Assume ClientResponse has \"body\", and otherwise it's a requests.Response", "content", "=", "response", ".", "text", "(", ")", "if", "hasattr", "(", "response", ",", "\"body\"", ")", "else", "response", ...
Check if response body contains meaningful content. :rtype: bool :raises: DeserializationError if response body contains invalid json data.
[ "Check", "if", "response", "body", "contains", "meaningful", "content", "." ]
python
train
sassoo/goldman
goldman/queryparams/sort.py
https://github.com/sassoo/goldman/blob/b72540c9ad06b5c68aadb1b4fa8cb0b716260bf2/goldman/queryparams/sort.py#L74-L84
def _validate_field(param, fields): """ Ensure the sortable field exists on the model """ if param.field not in fields: raise InvalidQueryParams(**{ 'detail': 'The sort query param value of "%s" is ' 'invalid. That field does not exist on the ' 'resource being requested.' % param.raw_field, 'links': LINK, 'parameter': PARAM, })
[ "def", "_validate_field", "(", "param", ",", "fields", ")", ":", "if", "param", ".", "field", "not", "in", "fields", ":", "raise", "InvalidQueryParams", "(", "*", "*", "{", "'detail'", ":", "'The sort query param value of \"%s\" is '", "'invalid. That field does not...
Ensure the sortable field exists on the model
[ "Ensure", "the", "sortable", "field", "exists", "on", "the", "model" ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/workflow_builder.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/workflow_builder.py#L183-L200
def validate_ignore_reuse(stages, ignore_reuse_stages): """ Checks if each stage ID specified in ignore_reuse_stages exists in the workflow definition. If ignore_reuse_stages contains only '*', the field is valid. """ if not isinstance(ignore_reuse_stages, list): raise WorkflowBuilderException('"IgnoreReuse must be a list of strings - stage IDs or "*"') ignore_reuse_set = set(ignore_reuse_stages) if '*' in ignore_reuse_set and ignore_reuse_set == 1: return stage_ids = set([stage.get('id') for stage in stages]) for ignored in ignore_reuse_set: if ignored not in stage_ids: raise WorkflowBuilderException( 'Stage with ID {} not found. Add a matching "id" for the stage you wish to set ignoreReuse for'.format(ignored))
[ "def", "validate_ignore_reuse", "(", "stages", ",", "ignore_reuse_stages", ")", ":", "if", "not", "isinstance", "(", "ignore_reuse_stages", ",", "list", ")", ":", "raise", "WorkflowBuilderException", "(", "'\"IgnoreReuse must be a list of strings - stage IDs or \"*\"'", ")"...
Checks if each stage ID specified in ignore_reuse_stages exists in the workflow definition. If ignore_reuse_stages contains only '*', the field is valid.
[ "Checks", "if", "each", "stage", "ID", "specified", "in", "ignore_reuse_stages", "exists", "in", "the", "workflow", "definition", ".", "If", "ignore_reuse_stages", "contains", "only", "*", "the", "field", "is", "valid", "." ]
python
train
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1640-L1655
def set_volume(self, volume): """ Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0 """ if volume < -80 or volume > 18: raise ValueError("Invalid volume") try: return bool(self.send_get_command( self._urls.command_set_volume % volume)) except requests.exceptions.RequestException: _LOGGER.error("Connection error: set volume command not sent.") return False
[ "def", "set_volume", "(", "self", ",", "volume", ")", ":", "if", "volume", "<", "-", "80", "or", "volume", ">", "18", ":", "raise", "ValueError", "(", "\"Invalid volume\"", ")", "try", ":", "return", "bool", "(", "self", ".", "send_get_command", "(", "...
Set receiver volume via HTTP get command. Volume is send in a format like -50.0. Minimum is -80.0, maximum at 18.0
[ "Set", "receiver", "volume", "via", "HTTP", "get", "command", "." ]
python
train
oasis-open/cti-stix-validator
stix2validator/output.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/output.py#L172-L188
def print_file_results(file_result): """Print the results of validating a file. Args: file_result: A FileValidationResults instance. """ print_results_header(file_result.filepath, file_result.is_valid) for object_result in file_result.object_results: if object_result.warnings: print_warning_results(object_result, 1) if object_result.errors: print_schema_results(object_result, 1) if file_result.fatal: print_fatal_results(file_result.fatal, 1)
[ "def", "print_file_results", "(", "file_result", ")", ":", "print_results_header", "(", "file_result", ".", "filepath", ",", "file_result", ".", "is_valid", ")", "for", "object_result", "in", "file_result", ".", "object_results", ":", "if", "object_result", ".", "...
Print the results of validating a file. Args: file_result: A FileValidationResults instance.
[ "Print", "the", "results", "of", "validating", "a", "file", "." ]
python
train
keans/lmnotify
lmnotify/lmnotify.py
https://github.com/keans/lmnotify/blob/b0a5282a582e5090852dc20fea8a135ca258d0d3/lmnotify/lmnotify.py#L443-L458
def set_bluetooth(self, active=None, name=None): """ allows to activate/deactivate bluetooth and change the name """ assert(active is not None or name is not None) log.debug("setting bluetooth state...") cmd, url = DEVICE_URLS["set_bluetooth"] json_data = {} if name is not None: json_data["name"] = name if active is not None: json_data["active"] = active return self._exec(cmd, url, json_data=json_data)
[ "def", "set_bluetooth", "(", "self", ",", "active", "=", "None", ",", "name", "=", "None", ")", ":", "assert", "(", "active", "is", "not", "None", "or", "name", "is", "not", "None", ")", "log", ".", "debug", "(", "\"setting bluetooth state...\"", ")", ...
allows to activate/deactivate bluetooth and change the name
[ "allows", "to", "activate", "/", "deactivate", "bluetooth", "and", "change", "the", "name" ]
python
train
googlefonts/glyphsLib
Lib/glyphsLib/parser.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/parser.py#L141-L153
def _parse_dict(self, text, i): """Parse a dictionary from source text starting at i.""" old_current_type = self.current_type new_type = self.current_type if new_type is None: # customparameter.value needs to be set from the found value new_type = dict elif type(new_type) == list: new_type = new_type[0] res = new_type() i = self._parse_dict_into_object(res, text, i) self.current_type = old_current_type return res, i
[ "def", "_parse_dict", "(", "self", ",", "text", ",", "i", ")", ":", "old_current_type", "=", "self", ".", "current_type", "new_type", "=", "self", ".", "current_type", "if", "new_type", "is", "None", ":", "# customparameter.value needs to be set from the found value...
Parse a dictionary from source text starting at i.
[ "Parse", "a", "dictionary", "from", "source", "text", "starting", "at", "i", "." ]
python
train
ranaroussi/pywallet
pywallet/utils/ethereum.py
https://github.com/ranaroussi/pywallet/blob/206ff224389c490d8798f660c9e79fe97ebb64cf/pywallet/utils/ethereum.py#L731-L745
def address(self, compressed=True, testnet=False): """ Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string """ version = '0x' return version + binascii.hexlify(self.keccak[12:]).decode('ascii')
[ "def", "address", "(", "self", ",", "compressed", "=", "True", ",", "testnet", "=", "False", ")", ":", "version", "=", "'0x'", "return", "version", "+", "binascii", ".", "hexlify", "(", "self", ".", "keccak", "[", "12", ":", "]", ")", ".", "decode", ...
Address property that returns the Base58Check encoded version of the HASH160. Args: compressed (bool): Whether or not the compressed key should be used. testnet (bool): Whether or not the key is intended for testnet usage. False indicates mainnet usage. Returns: bytes: Base58Check encoded string
[ "Address", "property", "that", "returns", "the", "Base58Check", "encoded", "version", "of", "the", "HASH160", "." ]
python
train
mardix/Mocha
mocha/utils.py
https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/utils.py#L67-L82
def is_username_valid(username): """ Check if a valid username. valid: oracle bill-gates steve.jobs micro_soft not valid Bill Gates - no space allowed me@yo.com - @ is not a valid character :param username: string :return: """ pattern = re.compile(r"^[a-zA-Z0-9_.-]+$") return bool(pattern.match(username))
[ "def", "is_username_valid", "(", "username", ")", ":", "pattern", "=", "re", ".", "compile", "(", "r\"^[a-zA-Z0-9_.-]+$\"", ")", "return", "bool", "(", "pattern", ".", "match", "(", "username", ")", ")" ]
Check if a valid username. valid: oracle bill-gates steve.jobs micro_soft not valid Bill Gates - no space allowed me@yo.com - @ is not a valid character :param username: string :return:
[ "Check", "if", "a", "valid", "username", ".", "valid", ":", "oracle", "bill", "-", "gates", "steve", ".", "jobs", "micro_soft", "not", "valid", "Bill", "Gates", "-", "no", "space", "allowed", "me" ]
python
train
asweigart/pyautogui
pyautogui/__init__.py
https://github.com/asweigart/pyautogui/blob/77524bd47334a89024013fd48e05151c3ac9289a/pyautogui/__init__.py#L210-L231
def _unpackXY(x, y): """If x is a sequence and y is None, returns x[0], y[0]. Else, returns x, y. On functions that receive a pair of x,y coordinates, they can be passed as separate arguments, or as a single two-element sequence. """ if isinstance(x, str): # x parameter is the string of an image filename to find and click on: x, y = center(locateOnScreen(x)) elif isinstance(x, collectionsSequence): if len(x) == 2: if y is None: x, y = x else: raise ValueError('When passing a sequence at the x argument, the y argument must not be passed (received {0}).'.format(repr(y))) else: raise ValueError('The supplied sequence must have exactly 2 elements ({0} were received).'.format(len(x))) else: pass # x and y are just number values return x, y
[ "def", "_unpackXY", "(", "x", ",", "y", ")", ":", "if", "isinstance", "(", "x", ",", "str", ")", ":", "# x parameter is the string of an image filename to find and click on:", "x", ",", "y", "=", "center", "(", "locateOnScreen", "(", "x", ")", ")", "elif", "...
If x is a sequence and y is None, returns x[0], y[0]. Else, returns x, y. On functions that receive a pair of x,y coordinates, they can be passed as separate arguments, or as a single two-element sequence.
[ "If", "x", "is", "a", "sequence", "and", "y", "is", "None", "returns", "x", "[", "0", "]", "y", "[", "0", "]", ".", "Else", "returns", "x", "y", "." ]
python
train
counsyl/django-pedant
pedant/decorators.py
https://github.com/counsyl/django-pedant/blob/9a9a77eacbceafebb150c80d81f78c9b19ed4d53/pedant/decorators.py#L257-L283
def log_template_errors(logger, log_level=logging.ERROR): """ Decorator to log template errors to the specified logger. @log_template_errors(logging.getLogger('mylogger'), logging.INFO) def my_view(*args): pass Will log template errors at INFO. The default log level is ERROR. """ if not (isinstance(log_level, int) and log_level in logging._levelNames): raise ValueError('Invalid log level %s' % log_level) decorators = [ _log_template_string_if_invalid(logger, log_level), _log_unicode_errors(logger, log_level), _always_strict_resolve, ] if django.VERSION < (1, 8): decorators.append(_patch_invalid_var_format_string) @decorator def function(f, *args, **kwargs): return reduce(__apply, decorators, f)(*args, **kwargs) return function
[ "def", "log_template_errors", "(", "logger", ",", "log_level", "=", "logging", ".", "ERROR", ")", ":", "if", "not", "(", "isinstance", "(", "log_level", ",", "int", ")", "and", "log_level", "in", "logging", ".", "_levelNames", ")", ":", "raise", "ValueErro...
Decorator to log template errors to the specified logger. @log_template_errors(logging.getLogger('mylogger'), logging.INFO) def my_view(*args): pass Will log template errors at INFO. The default log level is ERROR.
[ "Decorator", "to", "log", "template", "errors", "to", "the", "specified", "logger", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L1192-L1244
def mavlink_connection(device, baud=115200, source_system=255, planner_format=None, write=False, append=False, robust_parsing=True, notimestamps=False, input=True, dialect=None, autoreconnect=False, zero_time_base=False, retries=3, use_native=default_native): '''open a serial, UDP, TCP or file mavlink connection''' global mavfile_global if dialect is not None: set_dialect(dialect) if device.startswith('tcp:'): return mavtcp(device[4:], source_system=source_system, retries=retries, use_native=use_native) if device.startswith('tcpin:'): return mavtcpin(device[6:], source_system=source_system, retries=retries, use_native=use_native) if device.startswith('udpin:'): return mavudp(device[6:], input=True, source_system=source_system, use_native=use_native) if device.startswith('udpout:'): return mavudp(device[7:], input=False, source_system=source_system, use_native=use_native) if device.startswith('udpbcast:'): return mavudp(device[9:], input=False, source_system=source_system, use_native=use_native, broadcast=True) # For legacy purposes we accept the following syntax and let the caller to specify direction if device.startswith('udp:'): return mavudp(device[4:], input=input, source_system=source_system, use_native=use_native) if device.lower().endswith('.bin') or device.lower().endswith('.px4log'): # support dataflash logs from pymavlink import DFReader m = DFReader.DFReader_binary(device, zero_time_base=zero_time_base) mavfile_global = m return m if device.endswith('.log'): # support dataflash text logs from pymavlink import DFReader if DFReader.DFReader_is_text_log(device): m = DFReader.DFReader_text(device, zero_time_base=zero_time_base) mavfile_global = m return m # list of suffixes to prevent setting DOS paths as UDP sockets logsuffixes = ['mavlink', 'log', 'raw', 'tlog' ] suffix = device.split('.')[-1].lower() if device.find(':') != -1 and not suffix in logsuffixes: return mavudp(device, source_system=source_system, input=input, use_native=use_native) if os.path.isfile(device): if device.endswith(".elf") or device.find("/bin/") != -1: print("executing '%s'" % device) return mavchildexec(device, source_system=source_system, use_native=use_native) else: return mavlogfile(device, planner_format=planner_format, write=write, append=append, robust_parsing=robust_parsing, notimestamps=notimestamps, source_system=source_system, use_native=use_native) return mavserial(device, baud=baud, source_system=source_system, autoreconnect=autoreconnect, use_native=use_native)
[ "def", "mavlink_connection", "(", "device", ",", "baud", "=", "115200", ",", "source_system", "=", "255", ",", "planner_format", "=", "None", ",", "write", "=", "False", ",", "append", "=", "False", ",", "robust_parsing", "=", "True", ",", "notimestamps", ...
open a serial, UDP, TCP or file mavlink connection
[ "open", "a", "serial", "UDP", "TCP", "or", "file", "mavlink", "connection" ]
python
train
praekeltfoundation/seed-stage-based-messaging
subscriptions/tasks.py
https://github.com/praekeltfoundation/seed-stage-based-messaging/blob/6f0cacf0727ac2ed19877de214d58009c685b8fa/subscriptions/tasks.py#L337-L387
def post_send_process(context): """ Task to ensure subscription is bumped or converted """ if "error" in context: return context [deserialized_subscription] = serializers.deserialize( "json", context["subscription"] ) subscription = deserialized_subscription.object [messageset] = serializers.deserialize("json", context["messageset"]) messageset = messageset.object # Get set max set_max = messageset.messages.filter(lang=subscription.lang).count() logger.debug("set_max calculated - %s" % set_max) # Compare user position to max if subscription.next_sequence_number == set_max: with transaction.atomic(): # Mark current as completed logger.debug("marking current subscription as complete") subscription.completed = True subscription.active = False subscription.process_status = 2 # Completed deserialized_subscription.save( update_fields=("completed", "active", "process_status") ) # If next set defined create new subscription if messageset.next_set: logger.info("Creating new subscription for next set") newsub = Subscription.objects.create( identity=subscription.identity, lang=subscription.lang, messageset=messageset.next_set, schedule=messageset.next_set.default_schedule, ) logger.debug("Created Subscription <%s>" % newsub.id) else: # More in this set so increment by one logger.debug("incrementing next_sequence_number") subscription.next_sequence_number = F("next_sequence_number") + 1 logger.debug("setting process status back to 0") subscription.process_status = 0 logger.debug("saving subscription") deserialized_subscription.save( update_fields=("next_sequence_number", "process_status") ) # return response return "Subscription for %s updated" % str(subscription.id)
[ "def", "post_send_process", "(", "context", ")", ":", "if", "\"error\"", "in", "context", ":", "return", "context", "[", "deserialized_subscription", "]", "=", "serializers", ".", "deserialize", "(", "\"json\"", ",", "context", "[", "\"subscription\"", "]", ")",...
Task to ensure subscription is bumped or converted
[ "Task", "to", "ensure", "subscription", "is", "bumped", "or", "converted" ]
python
train
RaRe-Technologies/gensim-simserver
simserver/simserver.py
https://github.com/RaRe-Technologies/gensim-simserver/blob/e7e59e836ef6d9da019a8c6b218ef0bdd998b2da/simserver/simserver.py#L941-L947
def drop_index(self, keep_model=True): """Drop all indexed documents from the session. Optionally, drop model too.""" self.check_session() result = self.session.drop_index(keep_model) if self.autosession: self.commit() return result
[ "def", "drop_index", "(", "self", ",", "keep_model", "=", "True", ")", ":", "self", ".", "check_session", "(", ")", "result", "=", "self", ".", "session", ".", "drop_index", "(", "keep_model", ")", "if", "self", ".", "autosession", ":", "self", ".", "c...
Drop all indexed documents from the session. Optionally, drop model too.
[ "Drop", "all", "indexed", "documents", "from", "the", "session", ".", "Optionally", "drop", "model", "too", "." ]
python
train
ergoithz/browsepy
browsepy/manager.py
https://github.com/ergoithz/browsepy/blob/1612a930ef220fae507e1b152c531707e555bd92/browsepy/manager.py#L438-L453
def extract_plugin_arguments(self, plugin): ''' Given a plugin name, extracts its registered_arguments as an iterable of (args, kwargs) tuples. :param plugin: plugin name :type plugin: str :returns: iterable if (args, kwargs) tuples. :rtype: iterable ''' module = self.import_plugin(plugin) if hasattr(module, 'register_arguments'): manager = ArgumentPluginManager() module.register_arguments(manager) return manager._argparse_argkwargs return ()
[ "def", "extract_plugin_arguments", "(", "self", ",", "plugin", ")", ":", "module", "=", "self", ".", "import_plugin", "(", "plugin", ")", "if", "hasattr", "(", "module", ",", "'register_arguments'", ")", ":", "manager", "=", "ArgumentPluginManager", "(", ")", ...
Given a plugin name, extracts its registered_arguments as an iterable of (args, kwargs) tuples. :param plugin: plugin name :type plugin: str :returns: iterable if (args, kwargs) tuples. :rtype: iterable
[ "Given", "a", "plugin", "name", "extracts", "its", "registered_arguments", "as", "an", "iterable", "of", "(", "args", "kwargs", ")", "tuples", "." ]
python
train
markovmodel/msmtools
msmtools/analysis/sparse/stationary_vector.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/stationary_vector.py#L102-L125
def stationary_distribution_from_eigenvector(T, ncv=None): r"""Compute stationary distribution of stochastic matrix T. The stationary distribution is the left eigenvector corresponding to the 1 non-degenerate eigenvalue :math: `\lambda=1`. Input: ------ T : numpy array, shape(d,d) Transition matrix (stochastic matrix). ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- mu : numpy array, shape(d,) Vector of stationary probabilities. """ vals, vecs = scipy.sparse.linalg.eigs(T.transpose(), k=1, which='LR', ncv=ncv) nu = vecs[:, 0].real mu = nu / np.sum(nu) return mu
[ "def", "stationary_distribution_from_eigenvector", "(", "T", ",", "ncv", "=", "None", ")", ":", "vals", ",", "vecs", "=", "scipy", ".", "sparse", ".", "linalg", ".", "eigs", "(", "T", ".", "transpose", "(", ")", ",", "k", "=", "1", ",", "which", "=",...
r"""Compute stationary distribution of stochastic matrix T. The stationary distribution is the left eigenvector corresponding to the 1 non-degenerate eigenvalue :math: `\lambda=1`. Input: ------ T : numpy array, shape(d,d) Transition matrix (stochastic matrix). ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- mu : numpy array, shape(d,) Vector of stationary probabilities.
[ "r", "Compute", "stationary", "distribution", "of", "stochastic", "matrix", "T", "." ]
python
train
google/tangent
tangent/utils.py
https://github.com/google/tangent/blob/6533e83af09de7345d1b438512679992f080dcc9/tangent/utils.py#L106-L117
def unbroadcast(array, like): """Reverse the broadcasting operation. Args: array: An array. like: An array that could have been broadcasted to the shape of array. Returns: Tensor with certain dimensions summed to match the shape of `like`. """ unbroadcaster = unbroadcasters[type(array)] return unbroadcaster(array, like)
[ "def", "unbroadcast", "(", "array", ",", "like", ")", ":", "unbroadcaster", "=", "unbroadcasters", "[", "type", "(", "array", ")", "]", "return", "unbroadcaster", "(", "array", ",", "like", ")" ]
Reverse the broadcasting operation. Args: array: An array. like: An array that could have been broadcasted to the shape of array. Returns: Tensor with certain dimensions summed to match the shape of `like`.
[ "Reverse", "the", "broadcasting", "operation", "." ]
python
train
ramrod-project/database-brain
schema/brain/checks.py
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/checks.py#L86-L101
def successfuly_encodes(msg, raise_err=False): """ boolean response if a message contains correct information to serialize :param msg: <proto object> :param raise_err: <bool> :return: <bool> """ result = True try: msg.SerializeToString() except EncodeError as encode_error: if raise_err: raise encode_error result = False return result
[ "def", "successfuly_encodes", "(", "msg", ",", "raise_err", "=", "False", ")", ":", "result", "=", "True", "try", ":", "msg", ".", "SerializeToString", "(", ")", "except", "EncodeError", "as", "encode_error", ":", "if", "raise_err", ":", "raise", "encode_err...
boolean response if a message contains correct information to serialize :param msg: <proto object> :param raise_err: <bool> :return: <bool>
[ "boolean", "response", "if", "a", "message", "contains", "correct", "information", "to", "serialize" ]
python
train
portfors-lab/sparkle
sparkle/run/abstract_acquisition.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/abstract_acquisition.py#L64-L135
def set(self, **kwargs): """Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float """ self.player_lock.acquire() if 'acqtime' in kwargs: self.player.set_aidur(kwargs['acqtime']) if 'aifs' in kwargs: self.player.set_aifs(kwargs['aifs']) self.aifs = kwargs['aifs'] if 'aifs' in kwargs or 'acqtime' in kwargs: t = kwargs.get('acqtime', self.player.get_aidur()) npoints = t*float(kwargs.get('aifs', self.player.get_aifs())) self.aitimes = np.linspace(0, t, npoints) if 'trigger' in kwargs: self.player.set_trigger(kwargs['trigger']) self.player_lock.release() if 'aochan' in kwargs: self.aochan = kwargs['aochan'] if 'aichan' in kwargs: self.aichan = kwargs['aichan'] if 'binsz' in kwargs: self.binsz = kwargs['binsz'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'caldb' in kwargs: self.caldb = kwargs['caldb'] if 'calv' in kwargs: self.calv = kwargs['calv'] if 'calf' in kwargs: self.calf = kwargs['calf'] if 'caldb' in kwargs or 'calv' in kwargs: self.update_reference_voltage() if 'datafile' in kwargs: self.datafile = kwargs['datafile'] if 'reprate' in kwargs: self.reprate = kwargs['reprate'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'average' in kwargs: self.average = kwargs['average'] if 'reject' in kwargs: self.reject = kwargs['reject'] if 'rejectrate' in kwargs: self.rejectrate = kwargs['rejectrate']
[ "def", "set", "(", "self", ",", "*", "*", "kwargs", ")", ":", "self", ".", "player_lock", ".", "acquire", "(", ")", "if", "'acqtime'", "in", "kwargs", ":", "self", ".", "player", ".", "set_aidur", "(", "kwargs", "[", "'acqtime'", "]", ")", "if", "'...
Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float
[ "Sets", "an", "internal", "setting", "for", "acquistion", "using", "keywords", "." ]
python
train
facetoe/zenpy
zenpy/lib/api.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api.py#L734-L742
def group_memberships(self, user, include=None): """ Retrieve the group memberships for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id """ return self._query_zendesk(self.endpoint.group_memberships, 'group_membership', id=user, include=include)
[ "def", "group_memberships", "(", "self", ",", "user", ",", "include", "=", "None", ")", ":", "return", "self", ".", "_query_zendesk", "(", "self", ".", "endpoint", ".", "group_memberships", ",", "'group_membership'", ",", "id", "=", "user", ",", "include", ...
Retrieve the group memberships for this user. :param include: list of objects to sideload. `Side-loading API Docs <https://developer.zendesk.com/rest_api/docs/core/side_loading>`__. :param user: User object or id
[ "Retrieve", "the", "group", "memberships", "for", "this", "user", "." ]
python
train
pyviz/holoviews
holoviews/util/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/__init__.py#L411-L441
def _builder_reprs(cls, options, namespace=None, ns=None): """ Given a list of Option objects (such as those returned from OptsSpec.parse_options) or an %opts or %%opts magic string, return a list of corresponding option builder reprs. The namespace is typically given as 'hv' if fully qualified namespaces are desired. """ if isinstance(options, basestring): from .parser import OptsSpec if ns is None: try: ns = get_ipython().user_ns # noqa except: ns = globals() options = options.replace('%%opts','').replace('%opts','') options = OptsSpec.parse_options(options, ns=ns) reprs = [] ns = '{namespace}.'.format(namespace=namespace) if namespace else '' for option in options: kws = ', '.join('%s=%r' % (k,option.kwargs[k]) for k in sorted(option.kwargs)) if '.' in option.key: element = option.key.split('.')[0] spec = repr('.'.join(option.key.split('.')[1:])) + ', ' else: element = option.key spec = '' opts_format = '{ns}opts.{element}({spec}{kws})' reprs.append(opts_format.format(ns=ns, spec=spec, kws=kws, element=element)) return reprs
[ "def", "_builder_reprs", "(", "cls", ",", "options", ",", "namespace", "=", "None", ",", "ns", "=", "None", ")", ":", "if", "isinstance", "(", "options", ",", "basestring", ")", ":", "from", ".", "parser", "import", "OptsSpec", "if", "ns", "is", "None"...
Given a list of Option objects (such as those returned from OptsSpec.parse_options) or an %opts or %%opts magic string, return a list of corresponding option builder reprs. The namespace is typically given as 'hv' if fully qualified namespaces are desired.
[ "Given", "a", "list", "of", "Option", "objects", "(", "such", "as", "those", "returned", "from", "OptsSpec", ".", "parse_options", ")", "or", "an", "%opts", "or", "%%opts", "magic", "string", "return", "a", "list", "of", "corresponding", "option", "builder",...
python
train
edx/edx-enterprise
integrated_channels/xapi/management/commands/send_course_enrollments.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/integrated_channels/xapi/management/commands/send_course_enrollments.py#L111-L129
def send_xapi_statements(self, lrs_configuration, days): """ Send xAPI analytics data of the enterprise learners to the given LRS. Arguments: lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations of the LRS where to send xAPI learner analytics. days (int): Include course enrollment of this number of days. """ for course_enrollment in self.get_course_enrollments(lrs_configuration.enterprise_customer, days): try: send_course_enrollment_statement(lrs_configuration, course_enrollment) except ClientError: LOGGER.exception( 'Client error while sending course enrollment to xAPI for' ' enterprise customer {enterprise_customer}.'.format( enterprise_customer=lrs_configuration.enterprise_customer.name ) )
[ "def", "send_xapi_statements", "(", "self", ",", "lrs_configuration", ",", "days", ")", ":", "for", "course_enrollment", "in", "self", ".", "get_course_enrollments", "(", "lrs_configuration", ".", "enterprise_customer", ",", "days", ")", ":", "try", ":", "send_cou...
Send xAPI analytics data of the enterprise learners to the given LRS. Arguments: lrs_configuration (XAPILRSConfiguration): Configuration object containing LRS configurations of the LRS where to send xAPI learner analytics. days (int): Include course enrollment of this number of days.
[ "Send", "xAPI", "analytics", "data", "of", "the", "enterprise", "learners", "to", "the", "given", "LRS", "." ]
python
valid
XuShaohua/bcloud
bcloud/DownloadPage.py
https://github.com/XuShaohua/bcloud/blob/4b54e0fdccf2b3013285fef05c97354cfa31697b/bcloud/DownloadPage.py#L472-L487
def scan_tasks(self, ignore_shutdown=False): '''扫描所有下载任务, 并在需要时启动新的下载.''' for row in self.liststore: if len(self.workers.keys()) >= self.app.profile['concurr-download']: break if row[STATE_COL] == State.WAITING: self.start_worker(row) if not self.shutdown_button.get_active() or ignore_shutdown: return # Shutdown system after all tasks have finished for row in self.liststore: if (row[STATE_COL] not in (State.PAUSED, State.FINISHED, State.CANCELED)): return self.shutdown.shutdown()
[ "def", "scan_tasks", "(", "self", ",", "ignore_shutdown", "=", "False", ")", ":", "for", "row", "in", "self", ".", "liststore", ":", "if", "len", "(", "self", ".", "workers", ".", "keys", "(", ")", ")", ">=", "self", ".", "app", ".", "profile", "["...
扫描所有下载任务, 并在需要时启动新的下载.
[ "扫描所有下载任务", "并在需要时启动新的下载", "." ]
python
train
GoogleCloudPlatform/datastore-ndb-python
ndb/key.py
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/key.py#L574-L591
def get_async(self, **ctx_options): """Return a Future whose result is the entity for this Key. If no such entity exists, a Future is still returned, and the Future's eventual return result be None. """ from . import model, tasklets ctx = tasklets.get_context() cls = model.Model._kind_map.get(self.kind()) if cls: cls._pre_get_hook(self) fut = ctx.get(self, **ctx_options) if cls: post_hook = cls._post_get_hook if not cls._is_default_hook(model.Model._default_post_get_hook, post_hook): fut.add_immediate_callback(post_hook, self, fut) return fut
[ "def", "get_async", "(", "self", ",", "*", "*", "ctx_options", ")", ":", "from", ".", "import", "model", ",", "tasklets", "ctx", "=", "tasklets", ".", "get_context", "(", ")", "cls", "=", "model", ".", "Model", ".", "_kind_map", ".", "get", "(", "sel...
Return a Future whose result is the entity for this Key. If no such entity exists, a Future is still returned, and the Future's eventual return result be None.
[ "Return", "a", "Future", "whose", "result", "is", "the", "entity", "for", "this", "Key", "." ]
python
train
rapidpro/expressions
python/temba_expressions/evaluator.py
https://github.com/rapidpro/expressions/blob/b03d91ec58fc328960bce90ecb5fa49dcf467627/python/temba_expressions/evaluator.py#L359-L370
def visitFunctionCall(self, ctx): """ expression : fnname LPAREN parameters? RPAREN """ func_name = ctx.fnname().getText() if ctx.parameters() is not None: parameters = self.visit(ctx.parameters()) else: parameters = [] return self._functions.invoke_function(self._eval_context, func_name, parameters)
[ "def", "visitFunctionCall", "(", "self", ",", "ctx", ")", ":", "func_name", "=", "ctx", ".", "fnname", "(", ")", ".", "getText", "(", ")", "if", "ctx", ".", "parameters", "(", ")", "is", "not", "None", ":", "parameters", "=", "self", ".", "visit", ...
expression : fnname LPAREN parameters? RPAREN
[ "expression", ":", "fnname", "LPAREN", "parameters?", "RPAREN" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/obo_parser.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/obo_parser.py#L221-L248
def _init_optional_attrs(self, optional_attrs): """Prepare to store data from user-desired optional fields. Not loading these optional fields by default saves in space and speed. But allow the possibility for saving these fields, if the user desires, Including: comment consider def is_class_level is_metadata_tag is_transitive relationship replaced_by subset synonym transitive_over xref """ # Written by DV Klopfenstein # Required attributes are always loaded. All others are optionally loaded. self.attrs_req = ['id', 'alt_id', 'name', 'namespace', 'is_a', 'is_obsolete'] self.attrs_scalar = ['comment', 'defn', 'is_class_level', 'is_metadata_tag', 'is_transitive', 'transitive_over'] self.attrs_nested = frozenset(['relationship']) # Allow user to specify either: 'def' or 'defn' # 'def' is an obo field name, but 'defn' is legal Python attribute name fnc = lambda aopt: aopt if aopt != "defn" else "def" if optional_attrs is None: optional_attrs = [] elif isinstance(optional_attrs, str): optional_attrs = [fnc(optional_attrs)] if optional_attrs not in self.attrs_req else [] elif isinstance(optional_attrs, list) or isinstance(optional_attrs, set): optional_attrs = set([fnc(f) for f in optional_attrs if f not in self.attrs_req]) else: raise Exception("optional_attrs arg MUST BE A str, list, or set.") self.optional_attrs = optional_attrs
[ "def", "_init_optional_attrs", "(", "self", ",", "optional_attrs", ")", ":", "# Written by DV Klopfenstein", "# Required attributes are always loaded. All others are optionally loaded.", "self", ".", "attrs_req", "=", "[", "'id'", ",", "'alt_id'", ",", "'name'", ",", "'name...
Prepare to store data from user-desired optional fields. Not loading these optional fields by default saves in space and speed. But allow the possibility for saving these fields, if the user desires, Including: comment consider def is_class_level is_metadata_tag is_transitive relationship replaced_by subset synonym transitive_over xref
[ "Prepare", "to", "store", "data", "from", "user", "-", "desired", "optional", "fields", "." ]
python
train
yougov/elastic2-doc-manager
mongo_connector/doc_managers/elastic2_doc_manager.py
https://github.com/yougov/elastic2-doc-manager/blob/ad92138d1fd6656bb2e71cb5cc840f9ba0109c49/mongo_connector/doc_managers/elastic2_doc_manager.py#L458-L464
def _stream_search(self, *args, **kwargs): """Helper method for iterating over ES search results.""" for hit in scan( self.elastic, query=kwargs.pop("body", None), scroll="10m", **kwargs ): hit["_source"]["_id"] = hit["_id"] yield hit["_source"]
[ "def", "_stream_search", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "hit", "in", "scan", "(", "self", ".", "elastic", ",", "query", "=", "kwargs", ".", "pop", "(", "\"body\"", ",", "None", ")", ",", "scroll", "=", "\...
Helper method for iterating over ES search results.
[ "Helper", "method", "for", "iterating", "over", "ES", "search", "results", "." ]
python
train
vanheeringen-lab/gimmemotifs
gimmemotifs/fasta.py
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/fasta.py#L46-L69
def get_random(self, n, l=None): """ Return n random sequences from this Fasta object """ random_f = Fasta() if l: ids = self.ids[:] random.shuffle(ids) i = 0 while (i < n) and (len(ids) > 0): seq_id = ids.pop() if (len(self[seq_id]) >= l): start = random.randint(0, len(self[seq_id]) - l) random_f["random%s" % (i + 1)] = self[seq_id][start:start+l] i += 1 if len(random_f) != n: sys.stderr.write("Not enough sequences of required length") return else: return random_f else: choice = random.sample(self.ids, n) for i in range(n): random_f[choice[i]] = self[choice[i]] return random_f
[ "def", "get_random", "(", "self", ",", "n", ",", "l", "=", "None", ")", ":", "random_f", "=", "Fasta", "(", ")", "if", "l", ":", "ids", "=", "self", ".", "ids", "[", ":", "]", "random", ".", "shuffle", "(", "ids", ")", "i", "=", "0", "while",...
Return n random sequences from this Fasta object
[ "Return", "n", "random", "sequences", "from", "this", "Fasta", "object" ]
python
train
heitzmann/gdspy
examples/photonics.py
https://github.com/heitzmann/gdspy/blob/2c8d1313248c544e2066d19095b7ad7158c79bc9/examples/photonics.py#L14-L69
def waveguide(path, points, finish, bend_radius, number_of_points=0.01, direction=None, layer=0, datatype=0): ''' Easy waveguide creation tool with absolute positioning. path : starting `gdspy.Path` points : coordinates along which the waveguide will travel finish : end point of the waveguide bend_radius : radius of the turns in the waveguide number_of_points : same as in `path.turn` direction : starting direction layer : GDSII layer number datatype : GDSII datatype number Return `path`. ''' if direction is not None: path.direction = direction axis = 0 if path.direction[1] == 'x' else 1 points.append(finish[(axis + len(points)) % 2]) n = len(points) if points[0] > (path.x, path.y)[axis]: path.direction = ['+x', '+y'][axis] else: path.direction = ['-x', '-y'][axis] for i in range(n): path.segment( abs(points[i] - (path.x, path.y)[axis]) - bend_radius, layer=layer, datatype=datatype) axis = 1 - axis if i < n - 1: goto = points[i + 1] else: goto = finish[axis] if (goto > (path.x, path.y)[axis]) ^ ((path.direction[0] == '+') ^ (path.direction[1] == 'x')): bend = 'l' else: bend = 'r' path.turn( bend_radius, bend, number_of_points=number_of_points, layer=layer, datatype=datatype) return path.segment( abs(finish[axis] - (path.x, path.y)[axis]), layer=layer, datatype=datatype)
[ "def", "waveguide", "(", "path", ",", "points", ",", "finish", ",", "bend_radius", ",", "number_of_points", "=", "0.01", ",", "direction", "=", "None", ",", "layer", "=", "0", ",", "datatype", "=", "0", ")", ":", "if", "direction", "is", "not", "None",...
Easy waveguide creation tool with absolute positioning. path : starting `gdspy.Path` points : coordinates along which the waveguide will travel finish : end point of the waveguide bend_radius : radius of the turns in the waveguide number_of_points : same as in `path.turn` direction : starting direction layer : GDSII layer number datatype : GDSII datatype number Return `path`.
[ "Easy", "waveguide", "creation", "tool", "with", "absolute", "positioning", "." ]
python
train
trustar/trustar-python
trustar/report_client.py
https://github.com/trustar/trustar-python/blob/707d51adc58d68aed7de12a4ca37949cb75cf122/trustar/report_client.py#L223-L240
def get_correlated_report_ids(self, indicators): """ DEPRECATED! Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators. :param indicators: A list of indicator values to retrieve correlated reports for. :return: The list of IDs of reports that correlated. Example: >>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"]) >>> print(report_ids) ["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"] """ params = {'indicators': indicators} resp = self._client.get("reports/correlate", params=params) return resp.json()
[ "def", "get_correlated_report_ids", "(", "self", ",", "indicators", ")", ":", "params", "=", "{", "'indicators'", ":", "indicators", "}", "resp", "=", "self", ".", "_client", ".", "get", "(", "\"reports/correlate\"", ",", "params", "=", "params", ")", "retur...
DEPRECATED! Retrieves a list of the IDs of all TruSTAR reports that contain the searched indicators. :param indicators: A list of indicator values to retrieve correlated reports for. :return: The list of IDs of reports that correlated. Example: >>> report_ids = ts.get_correlated_report_ids(["wannacry", "www.evil.com"]) >>> print(report_ids) ["e3bc6921-e2c8-42eb-829e-eea8da2d3f36", "4d04804f-ff82-4a0b-8586-c42aef2f6f73"]
[ "DEPRECATED!", "Retrieves", "a", "list", "of", "the", "IDs", "of", "all", "TruSTAR", "reports", "that", "contain", "the", "searched", "indicators", "." ]
python
train
awslabs/sockeye
sockeye/training.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/training.py#L813-L873
def _step(self, model: TrainingModel, batch: mx.io.DataBatch, checkpoint_interval: int, metric_train: mx.metric.EvalMetric, metric_loss: Optional[mx.metric.EvalMetric] = None): """ Performs an update to model given a batch and updates metrics. """ if model.monitor is not None: model.monitor.tic() #################### # Forward & Backward #################### model.run_forward_backward(batch, metric_train) # If using an extended optimizer, provide extra state information about the current batch optimizer = model.optimizer if metric_loss is not None and isinstance(optimizer, SockeyeOptimizer): # Loss for this batch metric_loss.reset() metric_loss.update(batch.label, model.module.get_outputs()) [(_, m_val)] = metric_loss.get_name_value() batch_state = BatchState(metric_val=m_val) optimizer.pre_update_batch(batch_state) ######## # UPDATE ######## if self.update_interval == 1 or self.state.batches % self.update_interval == 0: # Gradient rescaling gradient_norm = None if self.state.updates > 0 and (self.state.updates + 1) % checkpoint_interval == 0: # compute values for logging to metrics (before rescaling...) gradient_norm = self.state.gradient_norm = model.get_global_gradient_norm() self.state.gradients = model.get_gradients() # note: C.GRADIENT_CLIPPING_TYPE_ABS is handled by the mxnet optimizer directly if self.optimizer_config.gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_NORM: if gradient_norm is None: gradient_norm = model.get_global_gradient_norm() # clip gradients if gradient_norm > self.optimizer_config.gradient_clipping_threshold: ratio = self.optimizer_config.gradient_clipping_threshold / gradient_norm model.rescale_gradients(ratio) model.update() if self.update_interval > 1: model.zero_gradients() self.state.updates += 1 if model.monitor is not None: results = model.monitor.toc() if results: for _, k, v in results: logger.info('Monitor: Batch [{:d}] {:s} {:s}'.format(self.state.updates, k, v))
[ "def", "_step", "(", "self", ",", "model", ":", "TrainingModel", ",", "batch", ":", "mx", ".", "io", ".", "DataBatch", ",", "checkpoint_interval", ":", "int", ",", "metric_train", ":", "mx", ".", "metric", ".", "EvalMetric", ",", "metric_loss", ":", "Opt...
Performs an update to model given a batch and updates metrics.
[ "Performs", "an", "update", "to", "model", "given", "a", "batch", "and", "updates", "metrics", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/qos_mpls/map_apply/__init__.py#L166-L187
def _set_apply_dscp_exp_map_name(self, v, load=False): """ Setter method for apply_dscp_exp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_dscp_exp_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_dscp_exp_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_dscp_exp_map_name() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=apply_dscp_exp_map_name.apply_dscp_exp_map_name, is_container='container', presence=False, yang_name="apply-dscp-exp-map-name", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply dscp exp map', u'cli-sequence-commands': None, u'alt-name': u'dscp-exp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """apply_dscp_exp_map_name must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=apply_dscp_exp_map_name.apply_dscp_exp_map_name, is_container='container', presence=False, yang_name="apply-dscp-exp-map-name", rest_name="dscp-exp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply dscp exp map', u'cli-sequence-commands': None, u'alt-name': u'dscp-exp', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='container', is_config=True)""", }) self.__apply_dscp_exp_map_name = t if hasattr(self, '_set'): self._set()
[ "def", "_set_apply_dscp_exp_map_name", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ...
Setter method for apply_dscp_exp_map_name, mapped from YANG variable /qos_mpls/map_apply/apply_dscp_exp_map_name (container) If this variable is read-only (config: false) in the source YANG file, then _set_apply_dscp_exp_map_name is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_apply_dscp_exp_map_name() directly.
[ "Setter", "method", "for", "apply_dscp_exp_map_name", "mapped", "from", "YANG", "variable", "/", "qos_mpls", "/", "map_apply", "/", "apply_dscp_exp_map_name", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false"...
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L9299-L9314
def pckfrm(pck, ids): """ Find the set of reference frame class ID codes of all frames in a specified binary PCK file. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckfrm_c.html :param pck: Name of PCK file. :type pck: str :param ids: Set of frame class ID codes of frames in PCK file. :type ids: SpiceCell """ pck = stypes.stringToCharP(pck) assert isinstance(ids, stypes.SpiceCell) assert ids.dtype == 2 libspice.pckfrm_c(pck, ctypes.byref(ids))
[ "def", "pckfrm", "(", "pck", ",", "ids", ")", ":", "pck", "=", "stypes", ".", "stringToCharP", "(", "pck", ")", "assert", "isinstance", "(", "ids", ",", "stypes", ".", "SpiceCell", ")", "assert", "ids", ".", "dtype", "==", "2", "libspice", ".", "pckf...
Find the set of reference frame class ID codes of all frames in a specified binary PCK file. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pckfrm_c.html :param pck: Name of PCK file. :type pck: str :param ids: Set of frame class ID codes of frames in PCK file. :type ids: SpiceCell
[ "Find", "the", "set", "of", "reference", "frame", "class", "ID", "codes", "of", "all", "frames", "in", "a", "specified", "binary", "PCK", "file", "." ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/javaobj.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/javaobj.py#L269-L344
def do_classdesc(self, parent=None, ident=0): """do_classdesc""" # TC_CLASSDESC className serialVersionUID newHandle classDescInfo # classDescInfo: # classDescFlags fields classAnnotation superClassDesc # classDescFlags: # (byte) // Defined in Terminal Symbols and Constants # fields: # (short)<count> fieldDesc[count] # fieldDesc: # primitiveDesc # objectDesc # primitiveDesc: # prim_typecode fieldName # objectDesc: # obj_typecode fieldName className1 clazz = JavaClass() log_debug("[classdesc]", ident) ba = self._readString() clazz.name = ba log_debug("Class name: %s" % ba, ident) (serialVersionUID, newHandle, classDescFlags) = self._readStruct(">LLB") clazz.serialVersionUID = serialVersionUID clazz.flags = classDescFlags self._add_reference(clazz) log_debug("Serial: 0x%X newHandle: 0x%X.\ classDescFlags: 0x%X" % (serialVersionUID, newHandle, classDescFlags), ident) (length, ) = self._readStruct(">H") log_debug("Fields num: 0x%X" % length, ident) clazz.fields_names = [] clazz.fields_types = [] for _ in range(length): (typecode, ) = self._readStruct(">B") field_name = self._readString() field_type = None field_type = self._convert_char_to_type(typecode) if field_type == self.TYPE_ARRAY: _, field_type = self._read_and_exec_opcode( ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE]) assert isinstance(field_type, str) # if field_type is not None: # field_type = "array of " + field_type # else: # field_type = "array of None" elif field_type == self.TYPE_OBJECT: _, field_type = self._read_and_exec_opcode( ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE]) assert isinstance(field_type, str) log_debug("FieldName: 0x%X" % typecode + " " + str(field_name) + " " + str(field_type), ident) assert field_name is not None assert field_type is not None clazz.fields_names.append(field_name) clazz.fields_types.append(field_type) # pylint: disable=protected-access if parent: parent.__fields = clazz.fields_names parent.__types = clazz.fields_types # classAnnotation (opid, ) = self._readStruct(">B") log_debug("OpCode: 0x%X" % opid, ident) if opid != self.TC_ENDBLOCKDATA: raise NotImplementedError("classAnnotation isn't implemented yet") # superClassDesc _, superclassdesc = self._read_and_exec_opcode( ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE]) log_debug(str(superclassdesc), ident) clazz.superclass = superclassdesc return clazz
[ "def", "do_classdesc", "(", "self", ",", "parent", "=", "None", ",", "ident", "=", "0", ")", ":", "# TC_CLASSDESC className serialVersionUID newHandle classDescInfo", "# classDescInfo:", "# classDescFlags fields classAnnotation superClassDesc", "# classDescFlags:", "# (byte) ...
do_classdesc
[ "do_classdesc" ]
python
valid
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L1439-L1449
def _entry_allocated_bitmap(self, entry_number): """Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise. """ index, offset = divmod(entry_number, 8) return bool(self._bitmap[index] & (1 << offset))
[ "def", "_entry_allocated_bitmap", "(", "self", ",", "entry_number", ")", ":", "index", ",", "offset", "=", "divmod", "(", "entry_number", ",", "8", ")", "return", "bool", "(", "self", ".", "_bitmap", "[", "index", "]", "&", "(", "1", "<<", "offset", ")...
Checks if a particular index is allocated. Args: entry_number (int): Index to verify Returns: bool: True if it is allocated, False otherwise.
[ "Checks", "if", "a", "particular", "index", "is", "allocated", "." ]
python
train
mediawiki-utilities/python-mwreverts
mwreverts/historical_dict.py
https://github.com/mediawiki-utilities/python-mwreverts/blob/d379ac941e14e235ad82a48bd445a3dfa6cc022e/mwreverts/historical_dict.py#L28-L52
def insert(self, key, value): '''Adds a new key-value pair. Returns any discarded values.''' # Add to history and catch expectorate if len(self.history) == self.maxsize: expectorate = self.history[0] else: expectorate = None self.history.append((key, value)) # Add to the appropriate list of values if key in self: super().__getitem__(key).append(value) else: super().__setitem__(key, [value]) # Clean up old values if expectorate is not None: old_key, old_value = expectorate super().__getitem__(old_key).pop(0) if len(super().__getitem__(old_key)) == 0: super().__delitem__(old_key) return (old_key, old_value)
[ "def", "insert", "(", "self", ",", "key", ",", "value", ")", ":", "# Add to history and catch expectorate", "if", "len", "(", "self", ".", "history", ")", "==", "self", ".", "maxsize", ":", "expectorate", "=", "self", ".", "history", "[", "0", "]", "else...
Adds a new key-value pair. Returns any discarded values.
[ "Adds", "a", "new", "key", "-", "value", "pair", ".", "Returns", "any", "discarded", "values", "." ]
python
train
janpipek/physt
physt/plotting/matplotlib.py
https://github.com/janpipek/physt/blob/6dd441b073514e7728235f50b2352d56aacf38d4/physt/plotting/matplotlib.py#L961-L984
def _add_ticks(ax: Axes, h1: Histogram1D, kwargs: dict): """Customize ticks for an axis (1D histogram). Parameters ---------- ticks: {"center", "edge"}, optional Position of the ticks tick_handler: Callable[[Histogram1D, float, float], Tuple[List[float], List[str]]] ... """ ticks = kwargs.pop("ticks", None) tick_handler = kwargs.pop("tick_handler", None) if tick_handler: if ticks: raise ValueError("Cannot specify both tick and tick_handler") ticks, labels = tick_handler(h1, *ax.get_xlim()) ax.set_xticks(ticks) ax.set_xticklabels(labels) if ticks == "center": ax.set_xticks(h1.bin_centers) if ticks == "edge": ax.set_xticks(h1.bin_left_edges)
[ "def", "_add_ticks", "(", "ax", ":", "Axes", ",", "h1", ":", "Histogram1D", ",", "kwargs", ":", "dict", ")", ":", "ticks", "=", "kwargs", ".", "pop", "(", "\"ticks\"", ",", "None", ")", "tick_handler", "=", "kwargs", ".", "pop", "(", "\"tick_handler\""...
Customize ticks for an axis (1D histogram). Parameters ---------- ticks: {"center", "edge"}, optional Position of the ticks tick_handler: Callable[[Histogram1D, float, float], Tuple[List[float], List[str]]] ...
[ "Customize", "ticks", "for", "an", "axis", "(", "1D", "histogram", ")", "." ]
python
train
Cognexa/cxflow
cxflow/hooks/compute_stats.py
https://github.com/Cognexa/cxflow/blob/dd609e6b0bd854424a8f86781dd77801a13038f9/cxflow/hooks/compute_stats.py#L76-L90
def _compute_aggregation(aggregation: str, data: Iterable[Any]): """ Compute the specified aggregation on the given data. :param aggregation: the name of an arbitrary NumPy function (e.g., mean, max, median, nanmean, ...) or one of :py:attr:`EXTRA_AGGREGATIONS`. :param data: data to be aggregated :raise ValueError: if the specified aggregation is not supported or found in NumPy """ ComputeStats._raise_check_aggregation(aggregation) if aggregation == 'nanfraction': return np.sum(np.isnan(data)) / len(data) if aggregation == 'nancount': return int(np.sum(np.isnan(data))) return getattr(np, aggregation)(data)
[ "def", "_compute_aggregation", "(", "aggregation", ":", "str", ",", "data", ":", "Iterable", "[", "Any", "]", ")", ":", "ComputeStats", ".", "_raise_check_aggregation", "(", "aggregation", ")", "if", "aggregation", "==", "'nanfraction'", ":", "return", "np", "...
Compute the specified aggregation on the given data. :param aggregation: the name of an arbitrary NumPy function (e.g., mean, max, median, nanmean, ...) or one of :py:attr:`EXTRA_AGGREGATIONS`. :param data: data to be aggregated :raise ValueError: if the specified aggregation is not supported or found in NumPy
[ "Compute", "the", "specified", "aggregation", "on", "the", "given", "data", "." ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L9554-L9563
def remove_description_by_type(self, type_p): """Delete all records which are equal to the passed type from the list in type_p of type :class:`VirtualSystemDescriptionType` """ if not isinstance(type_p, VirtualSystemDescriptionType): raise TypeError("type_p can only be an instance of type VirtualSystemDescriptionType") self._call("removeDescriptionByType", in_p=[type_p])
[ "def", "remove_description_by_type", "(", "self", ",", "type_p", ")", ":", "if", "not", "isinstance", "(", "type_p", ",", "VirtualSystemDescriptionType", ")", ":", "raise", "TypeError", "(", "\"type_p can only be an instance of type VirtualSystemDescriptionType\"", ")", "...
Delete all records which are equal to the passed type from the list in type_p of type :class:`VirtualSystemDescriptionType`
[ "Delete", "all", "records", "which", "are", "equal", "to", "the", "passed", "type", "from", "the", "list" ]
python
train
onnx/onnx-mxnet
onnx_mxnet/common.py
https://github.com/onnx/onnx-mxnet/blob/b602d75c5a01f5ed8f68b11150a06374f058a86b/onnx_mxnet/common.py#L137-L142
def _required_attr(self, attr, key): """Wrapper for getting required attributes.""" assert isinstance(attr, dict) if key not in attr: raise AttributeError("Required attribute {} not found.".format(key)) return attr[key]
[ "def", "_required_attr", "(", "self", ",", "attr", ",", "key", ")", ":", "assert", "isinstance", "(", "attr", ",", "dict", ")", "if", "key", "not", "in", "attr", ":", "raise", "AttributeError", "(", "\"Required attribute {} not found.\"", ".", "format", "(",...
Wrapper for getting required attributes.
[ "Wrapper", "for", "getting", "required", "attributes", "." ]
python
train
justanr/Flask-Transfer
flask_transfer/transfer.py
https://github.com/justanr/Flask-Transfer/blob/075ba9edb8c8d0ea47619cc763394bbb717c2ead/flask_transfer/transfer.py#L15-L25
def _make_destination_callable(dest): """Creates a callable out of the destination. If it's already callable, the destination is returned. Instead, if the object is a string or a writable object, it's wrapped in a closure to be used later. """ if callable(dest): return dest elif hasattr(dest, 'write') or isinstance(dest, string_types): return _use_filehandle_to_save(dest) else: raise TypeError("Destination must be a string, writable or callable object.")
[ "def", "_make_destination_callable", "(", "dest", ")", ":", "if", "callable", "(", "dest", ")", ":", "return", "dest", "elif", "hasattr", "(", "dest", ",", "'write'", ")", "or", "isinstance", "(", "dest", ",", "string_types", ")", ":", "return", "_use_file...
Creates a callable out of the destination. If it's already callable, the destination is returned. Instead, if the object is a string or a writable object, it's wrapped in a closure to be used later.
[ "Creates", "a", "callable", "out", "of", "the", "destination", ".", "If", "it", "s", "already", "callable", "the", "destination", "is", "returned", ".", "Instead", "if", "the", "object", "is", "a", "string", "or", "a", "writable", "object", "it", "s", "w...
python
train
etcher-be/epab
epab/utils/_repo.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/utils/_repo.py#L544-L561
def is_dirty(self, untracked=False) -> bool: """ Checks if the current repository contains uncommitted or untracked changes Returns: true if the repository is clean """ result = False if not self.index_is_empty(): LOGGER.error('index is not empty') result = True changed_files = self.changed_files() if bool(changed_files): LOGGER.error(f'Repo has %s modified files: %s', len(changed_files), changed_files) result = True if untracked: result = result or bool(self.untracked_files()) return result
[ "def", "is_dirty", "(", "self", ",", "untracked", "=", "False", ")", "->", "bool", ":", "result", "=", "False", "if", "not", "self", ".", "index_is_empty", "(", ")", ":", "LOGGER", ".", "error", "(", "'index is not empty'", ")", "result", "=", "True", ...
Checks if the current repository contains uncommitted or untracked changes Returns: true if the repository is clean
[ "Checks", "if", "the", "current", "repository", "contains", "uncommitted", "or", "untracked", "changes" ]
python
train
spacetelescope/drizzlepac
drizzlepac/hlautils/astrometric_utils.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/hlautils/astrometric_utils.py#L891-L932
def within_footprint(img, wcs, x, y): """Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image """ # start with limits of WCS shape if hasattr(wcs, 'naxis1'): naxis1 = wcs.naxis1 naxis2 = wcs.naxis2 elif hasattr(wcs, 'pixel_shape'): naxis1, naxis2 = wcs.pixel_shape else: naxis1 = wcs._naxis1 naxis2 = wcs._naxis2 maskx = np.bitwise_or(x < 0, x > naxis1) masky = np.bitwise_or(y < 0, y > naxis2) mask = ~np.bitwise_or(maskx, masky) x = x[mask] y = y[mask] # Now, confirm that these points fall within actual science area of WCS img_mask = create_image_footprint(img, wcs, border=1.0) inmask = np.where(img_mask[y.astype(np.int32), x.astype(np.int32)])[0] x = x[inmask] y = y[inmask] return x, y
[ "def", "within_footprint", "(", "img", ",", "wcs", ",", "x", ",", "y", ")", ":", "# start with limits of WCS shape", "if", "hasattr", "(", "wcs", ",", "'naxis1'", ")", ":", "naxis1", "=", "wcs", ".", "naxis1", "naxis2", "=", "wcs", ".", "naxis2", "elif",...
Determine whether input x, y fall in the science area of the image. Parameters ---------- img : ndarray ndarray of image where non-science areas are marked with value of NaN. wcs : `stwcs.wcsutil.HSTWCS` HSTWCS or WCS object with naxis terms defined. x, y : ndarray arrays of x, y positions for sources to be checked. Returns ------- x, y : ndarray New arrays which have been trimmed of all sources that fall outside the science areas of the image
[ "Determine", "whether", "input", "x", "y", "fall", "in", "the", "science", "area", "of", "the", "image", "." ]
python
train
pywbem/pywbem
pywbem/tupleparse.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/tupleparse.py#L570-L596
def parse_value_instancewithpath(self, tup_tree): """ The VALUE.INSTANCEWITHPATH is used to define a value that comprises a single CIMInstance with additional information that defines the absolute path to that object. :: <!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)> """ self.check_node(tup_tree, 'VALUE.INSTANCEWITHPATH') k = kids(tup_tree) if len(k) != 2: raise CIMXMLParseError( _format("Element {0!A} has invalid number of child elements " "{1!A} (expecting two child elements " "(INSTANCEPATH, INSTANCE))", name(tup_tree), k), conn_id=self.conn_id) inst_path = self.parse_instancepath(k[0]) instance = self.parse_instance(k[1]) instance.path = inst_path return instance
[ "def", "parse_value_instancewithpath", "(", "self", ",", "tup_tree", ")", ":", "self", ".", "check_node", "(", "tup_tree", ",", "'VALUE.INSTANCEWITHPATH'", ")", "k", "=", "kids", "(", "tup_tree", ")", "if", "len", "(", "k", ")", "!=", "2", ":", "raise", ...
The VALUE.INSTANCEWITHPATH is used to define a value that comprises a single CIMInstance with additional information that defines the absolute path to that object. :: <!ELEMENT VALUE.INSTANCEWITHPATH (INSTANCEPATH, INSTANCE)>
[ "The", "VALUE", ".", "INSTANCEWITHPATH", "is", "used", "to", "define", "a", "value", "that", "comprises", "a", "single", "CIMInstance", "with", "additional", "information", "that", "defines", "the", "absolute", "path", "to", "that", "object", "." ]
python
train
DataONEorg/d1_python
utilities/src/d1_util/generate_data_package_from_stream.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/utilities/src/d1_util/generate_data_package_from_stream.py#L73-L99
def pids2ore(in_stream, fmt='xml', base_url='https://cn.dataone.org/cn'): """read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids """ pids = [] for line in in_stream: pid = line.strip() if len(pid) > 0: if not pid.startswith("# "): pids.append(pid) if (len(pids)) < 2: raise ValueError("Insufficient identifiers provided.") logging.info("Read %d identifiers", len(pids)) ore = ResourceMap(base_url=base_url) logging.info("ORE PID = %s", pids[0]) ore.initialize(pids[0]) logging.info("Metadata PID = %s", pids[1]) ore.addMetadataDocument(pids[1]) ore.addDataDocuments(pids[2:], pids[1]) return ore.serialize_to_display(doc_format=fmt)
[ "def", "pids2ore", "(", "in_stream", ",", "fmt", "=", "'xml'", ",", "base_url", "=", "'https://cn.dataone.org/cn'", ")", ":", "pids", "=", "[", "]", "for", "line", "in", "in_stream", ":", "pid", "=", "line", ".", "strip", "(", ")", "if", "len", "(", ...
read pids from in_stream and generate a resource map. first pid is the ore_pid second is the sci meta pid remainder are data pids
[ "read", "pids", "from", "in_stream", "and", "generate", "a", "resource", "map", "." ]
python
train
bodylabs/lace
lace/topology.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/topology.py#L76-L103
def vertex_indices_in_segments(self, segments, ret_face_indices=False): ''' Given a list of segment names, return an array of vertex indices for all the vertices in those faces. Args: segments: a list of segment names, ret_face_indices: if it is `True`, returns face indices ''' import numpy as np import warnings face_indices = np.array([]) vertex_indices = np.array([]) if self.segm is not None: try: segments = [self.segm[name] for name in segments] except KeyError as e: raise ValueError('Unknown segments {}. Consier using Mesh.clean_segments on segments'.format(e.args[0])) face_indices = np.unique(np.concatenate(segments)) vertex_indices = np.unique(np.ravel(self.f[face_indices])) else: warnings.warn('self.segm is None, will return empty array') if ret_face_indices: return vertex_indices, face_indices else: return vertex_indices
[ "def", "vertex_indices_in_segments", "(", "self", ",", "segments", ",", "ret_face_indices", "=", "False", ")", ":", "import", "numpy", "as", "np", "import", "warnings", "face_indices", "=", "np", ".", "array", "(", "[", "]", ")", "vertex_indices", "=", "np",...
Given a list of segment names, return an array of vertex indices for all the vertices in those faces. Args: segments: a list of segment names, ret_face_indices: if it is `True`, returns face indices
[ "Given", "a", "list", "of", "segment", "names", "return", "an", "array", "of", "vertex", "indices", "for", "all", "the", "vertices", "in", "those", "faces", "." ]
python
train
carpyncho/feets
feets/preprocess.py
https://github.com/carpyncho/feets/blob/53bdfb73b53845561914fc1f756e0c2377b9b76b/feets/preprocess.py#L44-L73
def remove_noise(time, magnitude, error, error_limit=3, std_limit=5): """Points within 'std_limit' standard deviations from the mean and with errors greater than 'error_limit' times the error mean are considered as noise and thus are eliminated. """ data, mjd = magnitude, time data_len = len(mjd) error_mean = np.mean(error) error_tolerance = error_limit * (error_mean or 1) data_mean = np.mean(data) data_std = np.std(data) mjd_out, data_out, error_out = [], [], [] for i in range(data_len): is_not_noise = ( error[i] < error_tolerance and (np.absolute(data[i] - data_mean) / data_std) < std_limit) if is_not_noise: mjd_out.append(mjd[i]) data_out.append(data[i]) error_out.append(error[i]) data_out = np.asarray(data_out) mjd_out = np.asarray(mjd_out) error_out = np.asarray(error_out) return mjd_out, data_out, error_out
[ "def", "remove_noise", "(", "time", ",", "magnitude", ",", "error", ",", "error_limit", "=", "3", ",", "std_limit", "=", "5", ")", ":", "data", ",", "mjd", "=", "magnitude", ",", "time", "data_len", "=", "len", "(", "mjd", ")", "error_mean", "=", "np...
Points within 'std_limit' standard deviations from the mean and with errors greater than 'error_limit' times the error mean are considered as noise and thus are eliminated.
[ "Points", "within", "std_limit", "standard", "deviations", "from", "the", "mean", "and", "with", "errors", "greater", "than", "error_limit", "times", "the", "error", "mean", "are", "considered", "as", "noise", "and", "thus", "are", "eliminated", "." ]
python
train
shaypal5/pdutil
pdutil/iter/iter.py
https://github.com/shaypal5/pdutil/blob/231059634643af2558d22070f89767410978cf56/pdutil/iter/iter.py#L4-L32
def sub_dfs_by_size(df, size): """Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin """ for i in range(0, len(df), size): yield (df.iloc[i:i + size])
[ "def", "sub_dfs_by_size", "(", "df", ",", "size", ")", ":", "for", "i", "in", "range", "(", "0", ",", "len", "(", "df", ")", ",", "size", ")", ":", "yield", "(", "df", ".", "iloc", "[", "i", ":", "i", "+", "size", "]", ")" ]
Get a generator yielding consecutive sub-dataframes of the given size. Arguments --------- df : pandas.DataFrame The dataframe for which to get sub-dataframes. size : int The size of each sub-dataframe. Returns ------- generator A generator yielding consecutive sub-dataframe of the given size. Example ------- >>> import pandas as pd; import pdutil; >>> data = [[23, "Jen"], [42, "Ray"], [15, "Fin"]] >>> df = pd.DataFrame(data, columns=['age', 'name']) >>> for subdf in pdutil.iter.sub_dfs_by_size(df, 2): print(subdf) age name 0 23 Jen 1 42 Ray age name 2 15 Fin
[ "Get", "a", "generator", "yielding", "consecutive", "sub", "-", "dataframes", "of", "the", "given", "size", "." ]
python
train
PyCQA/pylint
pylint/config.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/config.py#L796-L800
def help(self, level=0): """return the usage string for available options """ self.cmdline_parser.formatter.output_level = level with _patch_optparse(): return self.cmdline_parser.format_help()
[ "def", "help", "(", "self", ",", "level", "=", "0", ")", ":", "self", ".", "cmdline_parser", ".", "formatter", ".", "output_level", "=", "level", "with", "_patch_optparse", "(", ")", ":", "return", "self", ".", "cmdline_parser", ".", "format_help", "(", ...
return the usage string for available options
[ "return", "the", "usage", "string", "for", "available", "options" ]
python
test
spyder-ide/spyder
spyder/plugins/ipythonconsole/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/ipythonconsole/plugin.py#L754-L759
def edit_file(self, filename, line): """Handle %edit magic petitions.""" if encoding.is_text_file(filename): # The default line number sent by ipykernel is always the last # one, but we prefer to use the first. self.edit_goto.emit(filename, 1, '')
[ "def", "edit_file", "(", "self", ",", "filename", ",", "line", ")", ":", "if", "encoding", ".", "is_text_file", "(", "filename", ")", ":", "# The default line number sent by ipykernel is always the last\r", "# one, but we prefer to use the first.\r", "self", ".", "edit_go...
Handle %edit magic petitions.
[ "Handle", "%edit", "magic", "petitions", "." ]
python
train
ergoithz/unicategories
unicategories/tools.py
https://github.com/ergoithz/unicategories/blob/70ade9fa3662ac3fc62fb2648a29a360a4d82025/unicategories/tools.py#L133-L161
def generate(categorize=unicodedata.category, group_class=RangeGroup): ''' Generate a dict of RangeGroups for each unicode character category, including general ones. :param categorize: category function, defaults to unicodedata.category. :type categorize: callable :param group_class: class for range groups, defaults to RangeGroup :type group_class: type :returns: dictionary of categories and range groups :rtype: dict of RangeGroup ''' categories = collections.defaultdict(list) last_category = None last_range = None for c in range(sys.maxunicode + 1): category = categorize(chr(c)) if category != last_category: last_category = category last_range = [c, c + 1] categories[last_category].append(last_range) else: last_range[1] += 1 categories = {k: group_class(v) for k, v in categories.items()} categories.update({ k: merge(*map(categories.__getitem__, g)) for k, g in itertools.groupby(sorted(categories), key=lambda k: k[0]) }) return categories
[ "def", "generate", "(", "categorize", "=", "unicodedata", ".", "category", ",", "group_class", "=", "RangeGroup", ")", ":", "categories", "=", "collections", ".", "defaultdict", "(", "list", ")", "last_category", "=", "None", "last_range", "=", "None", "for", ...
Generate a dict of RangeGroups for each unicode character category, including general ones. :param categorize: category function, defaults to unicodedata.category. :type categorize: callable :param group_class: class for range groups, defaults to RangeGroup :type group_class: type :returns: dictionary of categories and range groups :rtype: dict of RangeGroup
[ "Generate", "a", "dict", "of", "RangeGroups", "for", "each", "unicode", "character", "category", "including", "general", "ones", "." ]
python
train
dmwm/DBS
Client/src/python/dbs/apis/dbsClient.py
https://github.com/dmwm/DBS/blob/9619bafce3783b3e77f0415f8f9a258e33dd1e6f/Client/src/python/dbs/apis/dbsClient.py#L679-L758
def listDatasets(self, **kwargs): """ API to list dataset(s) in DBS * You can use ANY combination of these parameters in this API * In absence of parameters, all valid datasets known to the DBS instance will be returned :param dataset: Full dataset (path) of the dataset :type dataset: str :param parent_dataset: Full dataset (path) of the dataset :type parent_dataset: str :param release_version: cmssw version :type release_version: str :param pset_hash: pset hash :type pset_hash: str :param app_name: Application name (generally it is cmsRun) :type app_name: str :param output_module_label: output_module_label :type output_module_label: str :param processing_version: Processing Version :type processing_version: str :param acquisition_era_name: Acquisition Era :type acquisition_era_name: str :param run_num: Specify a specific run number or range: Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...] :type run_num: int,list,str :param physics_group_name: List only dataset having physics_group_name attribute :type physics_group_name: str :param logical_file_name: List dataset containing the logical_file_name :type logical_file_name: str :param primary_ds_name: Primary Dataset Name :type primary_ds_name: str :param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA) :type primary_ds_type: str :param processed_ds_name: List datasets having this processed dataset name :type processed_ds_name: str :param data_tier_name: Data Tier :type data_tier_name: str :param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.) :type dataset_access_type: str :param prep_id: prep_id :type prep_id: str :param create_by: Creator of the dataset :type create_by: str :param last_modified_by: Last modifier of the dataset :type last_modified_by: str :param min_cdate: Lower limit for the creation date (unixtime) (Optional) :type min_cdate: int, str :param max_cdate: Upper limit for the creation date (unixtime) (Optional) :type max_cdate: int, str :param min_ldate: Lower limit for the last modification date (unixtime) (Optional) :type min_ldate: int, str :param max_ldate: Upper limit for the last modification date (unixtime) (Optional) :type max_ldate: int, str :param cdate: creation date (unixtime) (Optional) :type cdate: int, str :param ldate: last modification date (unixtime) (Optional) :type ldate: int, str :param detail: List all details of a dataset :type detail: bool :param dataset_id: DB primary key of datasets table. :type dataset_id: int, str :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type) :rtype: list of dicts """ validParameters = ['dataset', 'parent_dataset', 'is_dataset_valid', 'release_version', 'pset_hash', 'app_name', 'output_module_label', 'processing_version', 'acquisition_era_name', 'run_num', 'physics_group_name', 'logical_file_name', 'primary_ds_name', 'primary_ds_type', 'processed_ds_name', 'data_tier_name', 'dataset_access_type', 'prep_id', 'create_by', 'last_modified_by', 'min_cdate', 'max_cdate', 'min_ldate', 'max_ldate', 'cdate', 'ldate', 'detail', 'dataset_id'] #set defaults if 'detail' not in kwargs.keys(): kwargs['detail'] = False checkInputParameter(method="listDatasets", parameters=kwargs.keys(), validParameters=validParameters) return self.__callServer("datasets", params=kwargs)
[ "def", "listDatasets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "validParameters", "=", "[", "'dataset'", ",", "'parent_dataset'", ",", "'is_dataset_valid'", ",", "'release_version'", ",", "'pset_hash'", ",", "'app_name'", ",", "'output_module_label'", ",",...
API to list dataset(s) in DBS * You can use ANY combination of these parameters in this API * In absence of parameters, all valid datasets known to the DBS instance will be returned :param dataset: Full dataset (path) of the dataset :type dataset: str :param parent_dataset: Full dataset (path) of the dataset :type parent_dataset: str :param release_version: cmssw version :type release_version: str :param pset_hash: pset hash :type pset_hash: str :param app_name: Application name (generally it is cmsRun) :type app_name: str :param output_module_label: output_module_label :type output_module_label: str :param processing_version: Processing Version :type processing_version: str :param acquisition_era_name: Acquisition Era :type acquisition_era_name: str :param run_num: Specify a specific run number or range: Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...] :type run_num: int,list,str :param physics_group_name: List only dataset having physics_group_name attribute :type physics_group_name: str :param logical_file_name: List dataset containing the logical_file_name :type logical_file_name: str :param primary_ds_name: Primary Dataset Name :type primary_ds_name: str :param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA) :type primary_ds_type: str :param processed_ds_name: List datasets having this processed dataset name :type processed_ds_name: str :param data_tier_name: Data Tier :type data_tier_name: str :param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.) :type dataset_access_type: str :param prep_id: prep_id :type prep_id: str :param create_by: Creator of the dataset :type create_by: str :param last_modified_by: Last modifier of the dataset :type last_modified_by: str :param min_cdate: Lower limit for the creation date (unixtime) (Optional) :type min_cdate: int, str :param max_cdate: Upper limit for the creation date (unixtime) (Optional) :type max_cdate: int, str :param min_ldate: Lower limit for the last modification date (unixtime) (Optional) :type min_ldate: int, str :param max_ldate: Upper limit for the last modification date (unixtime) (Optional) :type max_ldate: int, str :param cdate: creation date (unixtime) (Optional) :type cdate: int, str :param ldate: last modification date (unixtime) (Optional) :type ldate: int, str :param detail: List all details of a dataset :type detail: bool :param dataset_id: DB primary key of datasets table. :type dataset_id: int, str :returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type) :rtype: list of dicts
[ "API", "to", "list", "dataset", "(", "s", ")", "in", "DBS", "*", "You", "can", "use", "ANY", "combination", "of", "these", "parameters", "in", "this", "API", "*", "In", "absence", "of", "parameters", "all", "valid", "datasets", "known", "to", "the", "D...
python
train