repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
neighbordog/deviantart
deviantart/api.py
https://github.com/neighbordog/deviantart/blob/5612f1d5e2139a48c9d793d7fd19cde7e162d7b1/deviantart/api.py#L962-L1009
def update_user(self, user_is_artist="", artist_level="", artist_specialty="", real_name="", tagline="", countryid="", website="", bio=""): """Update the users profile information :param user_is_artist: Is the user an artist? :param artist_level: If the user is an artist, what level are they :param artist_specialty: If the user is an artist, what is their specialty :param real_name: The users real name :param tagline: The users tagline :param countryid: The users location :param website: The users personal website :param bio: The users bio """ if self.standard_grant_type is not "authorization_code": raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.") post_data = {} if user_is_artist: post_data["user_is_artist"] = user_is_artist if artist_level: post_data["artist_level"] = artist_level if artist_specialty: post_data["artist_specialty"] = artist_specialty if real_name: post_data["real_name"] = real_name if tagline: post_data["tagline"] = tagline if countryid: post_data["countryid"] = countryid if website: post_data["website"] = website if bio: post_data["bio"] = bio response = self._req('/user/profile/update', post_data=post_data) return response['success']
[ "def", "update_user", "(", "self", ",", "user_is_artist", "=", "\"\"", ",", "artist_level", "=", "\"\"", ",", "artist_specialty", "=", "\"\"", ",", "real_name", "=", "\"\"", ",", "tagline", "=", "\"\"", ",", "countryid", "=", "\"\"", ",", "website", "=", ...
Update the users profile information :param user_is_artist: Is the user an artist? :param artist_level: If the user is an artist, what level are they :param artist_specialty: If the user is an artist, what is their specialty :param real_name: The users real name :param tagline: The users tagline :param countryid: The users location :param website: The users personal website :param bio: The users bio
[ "Update", "the", "users", "profile", "information" ]
python
train
vtkiorg/vtki
vtki/renderer.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/renderer.py#L748-L762
def enable_eye_dome_lighting(self): """Enable eye dome lighting (EDL)""" if hasattr(self, 'edl_pass'): return self # create the basic VTK render steps basic_passes = vtk.vtkRenderStepsPass() # blur the resulting image # The blur delegates rendering the unblured image to the basic_passes self.edl_pass = vtk.vtkEDLShading() self.edl_pass.SetDelegatePass(basic_passes) # tell the renderer to use our render pass pipeline self.glrenderer = vtk.vtkOpenGLRenderer.SafeDownCast(self) self.glrenderer.SetPass(self.edl_pass) return self.glrenderer
[ "def", "enable_eye_dome_lighting", "(", "self", ")", ":", "if", "hasattr", "(", "self", ",", "'edl_pass'", ")", ":", "return", "self", "# create the basic VTK render steps", "basic_passes", "=", "vtk", ".", "vtkRenderStepsPass", "(", ")", "# blur the resulting image",...
Enable eye dome lighting (EDL)
[ "Enable", "eye", "dome", "lighting", "(", "EDL", ")" ]
python
train
JohannesBuchner/regulargrid
regulargrid/interpn.py
https://github.com/JohannesBuchner/regulargrid/blob/8803d8367c6b413d7a70ec474c67cc45ad8c00c8/regulargrid/interpn.py#L26-L46
def npinterpn(*args, **kw): """Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ... """ method = kw.pop('method', 'cubic') if kw: raise ValueError("Unknown arguments: " % kw.keys()) nd = (len(args)-1)//2 if len(args) != 2*nd+1: raise ValueError("Wrong number of arguments") q = args[:nd] qi = args[nd+1:] a = args[nd] for j in range(nd): #print q[j].shape, a.shape a = interp(q[j], a, axis=j, kind=method)(qi[j]) return a
[ "def", "npinterpn", "(", "*", "args", ",", "*", "*", "kw", ")", ":", "method", "=", "kw", ".", "pop", "(", "'method'", ",", "'cubic'", ")", "if", "kw", ":", "raise", "ValueError", "(", "\"Unknown arguments: \"", "%", "kw", ".", "keys", "(", ")", ")...
Interpolation on N-D. ai = interpn(x, y, z, ..., a, xi, yi, zi, ...) where the arrays x, y, z, ... define a rectangular grid and a.shape == (len(x), len(y), len(z), ...) are the values interpolate at xi, yi, zi, ...
[ "Interpolation", "on", "N", "-", "D", "." ]
python
train
BoGoEngine/bogo-python
bogo/core.py
https://github.com/BoGoEngine/bogo-python/blob/9b85329a408ded4cead3539cecba12984d5d7650/bogo/core.py#L289-L329
def _get_transformation_list(key, im, fallback_sequence): """ Return the list of transformations inferred from the entered key. The map between transform types and keys is given by module bogo_config (if exists) or by variable simple_telex_im if entered key is not in im, return "+key", meaning appending the entered key to current text """ # if key in im: # lkey = key # else: # lkey = key.lower() lkey = key.lower() if lkey in im: if isinstance(im[lkey], list): trans_list = im[lkey] else: trans_list = [im[lkey]] for i, trans in enumerate(trans_list): if trans[0] == '<' and key.isalpha(): trans_list[i] = trans[0] + \ utils.change_case(trans[1], int(key.isupper())) if trans_list == ['_']: if len(fallback_sequence) >= 2: # TODO Use takewhile()/dropwhile() to process the last IM keypress # instead of assuming it's the last key in fallback_sequence. t = list(map(lambda x: "_" + x, _get_transformation_list(fallback_sequence[-2], im, fallback_sequence[:-1]))) # print(t) trans_list = t # else: # trans_list = ['+' + key] return trans_list else: return ['+' + key]
[ "def", "_get_transformation_list", "(", "key", ",", "im", ",", "fallback_sequence", ")", ":", "# if key in im:", "# lkey = key", "# else:", "# lkey = key.lower()", "lkey", "=", "key", ".", "lower", "(", ")", "if", "lkey", "in", "im", ":", "if", "isinstan...
Return the list of transformations inferred from the entered key. The map between transform types and keys is given by module bogo_config (if exists) or by variable simple_telex_im if entered key is not in im, return "+key", meaning appending the entered key to current text
[ "Return", "the", "list", "of", "transformations", "inferred", "from", "the", "entered", "key", ".", "The", "map", "between", "transform", "types", "and", "keys", "is", "given", "by", "module", "bogo_config", "(", "if", "exists", ")", "or", "by", "variable", ...
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueArray.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueArray.py#L82-L97
def get_as_array(self, index): """ Converts array element into an AnyValueArray or returns empty AnyValueArray if conversion is not possible. :param index: an index of element to get. :return: AnyValueArray value of the element or empty AnyValueArray if conversion is not supported. """ if index == None: array = [] for value in self: array.append(value) return array else: value = self[index] return AnyValueArray.from_value(value)
[ "def", "get_as_array", "(", "self", ",", "index", ")", ":", "if", "index", "==", "None", ":", "array", "=", "[", "]", "for", "value", "in", "self", ":", "array", ".", "append", "(", "value", ")", "return", "array", "else", ":", "value", "=", "self"...
Converts array element into an AnyValueArray or returns empty AnyValueArray if conversion is not possible. :param index: an index of element to get. :return: AnyValueArray value of the element or empty AnyValueArray if conversion is not supported.
[ "Converts", "array", "element", "into", "an", "AnyValueArray", "or", "returns", "empty", "AnyValueArray", "if", "conversion", "is", "not", "possible", "." ]
python
train
praekeltfoundation/marathon-acme
marathon_acme/cli.py
https://github.com/praekeltfoundation/marathon-acme/blob/b1b71e3dde0ba30e575089280658bd32890e3325/marathon_acme/cli.py#L271-L283
def init_logging(log_level): """ Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for. """ log_level_filter = LogLevelFilterPredicate( LogLevel.levelWithName(log_level)) log_level_filter.setLogLevelForNamespace( 'twisted.web.client._HTTP11ClientFactory', LogLevel.warn) log_observer = FilteringLogObserver( textFileLogObserver(sys.stdout), [log_level_filter]) globalLogPublisher.addObserver(log_observer)
[ "def", "init_logging", "(", "log_level", ")", ":", "log_level_filter", "=", "LogLevelFilterPredicate", "(", "LogLevel", ".", "levelWithName", "(", "log_level", ")", ")", "log_level_filter", ".", "setLogLevelForNamespace", "(", "'twisted.web.client._HTTP11ClientFactory'", ...
Initialise the logging by adding an observer to the global log publisher. :param str log_level: The minimum log level to log messages for.
[ "Initialise", "the", "logging", "by", "adding", "an", "observer", "to", "the", "global", "log", "publisher", "." ]
python
valid
ray-project/ray
python/ray/log_monitor.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/log_monitor.py#L81-L88
def close_all_files(self): """Close all open files (so that we can open more).""" while len(self.open_file_infos) > 0: file_info = self.open_file_infos.pop(0) file_info.file_handle.close() file_info.file_handle = None self.closed_file_infos.append(file_info) self.can_open_more_files = True
[ "def", "close_all_files", "(", "self", ")", ":", "while", "len", "(", "self", ".", "open_file_infos", ")", ">", "0", ":", "file_info", "=", "self", ".", "open_file_infos", ".", "pop", "(", "0", ")", "file_info", ".", "file_handle", ".", "close", "(", "...
Close all open files (so that we can open more).
[ "Close", "all", "open", "files", "(", "so", "that", "we", "can", "open", "more", ")", "." ]
python
train
aiogram/aiogram
aiogram/utils/callback_data.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/utils/callback_data.py#L44-L81
def new(self, *args, **kwargs) -> str: """ Generate callback data :param args: :param kwargs: :return: """ args = list(args) data = [self.prefix] for part in self._part_names: value = kwargs.pop(part, None) if value is None: if args: value = args.pop(0) else: raise ValueError(f"Value for '{part}' is not passed!") if value is not None and not isinstance(value, str): value = str(value) if not value: raise ValueError(f"Value for part {part} can't be empty!'") elif self.sep in value: raise ValueError(f"Symbol defined as separator can't be used in values of parts") data.append(value) if args or kwargs: raise TypeError('Too many arguments is passed!') callback_data = self.sep.join(data) if len(callback_data) > 64: raise ValueError('Resulted callback data is too long!') return callback_data
[ "def", "new", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "->", "str", ":", "args", "=", "list", "(", "args", ")", "data", "=", "[", "self", ".", "prefix", "]", "for", "part", "in", "self", ".", "_part_names", ":", "value", "="...
Generate callback data :param args: :param kwargs: :return:
[ "Generate", "callback", "data" ]
python
train
pandas-dev/pandas
pandas/core/internals/concat.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/internals/concat.py#L395-L421
def trim_join_unit(join_unit, length): """ Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block. """ if 0 not in join_unit.indexers: extra_indexers = join_unit.indexers if join_unit.block is None: extra_block = None else: extra_block = join_unit.block.getitem_block(slice(length, None)) join_unit.block = join_unit.block.getitem_block(slice(length)) else: extra_block = join_unit.block extra_indexers = copy.copy(join_unit.indexers) extra_indexers[0] = extra_indexers[0][length:] join_unit.indexers[0] = join_unit.indexers[0][:length] extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:] join_unit.shape = (length,) + join_unit.shape[1:] return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
[ "def", "trim_join_unit", "(", "join_unit", ",", "length", ")", ":", "if", "0", "not", "in", "join_unit", ".", "indexers", ":", "extra_indexers", "=", "join_unit", ".", "indexers", "if", "join_unit", ".", "block", "is", "None", ":", "extra_block", "=", "Non...
Reduce join_unit's shape along item axis to length. Extra items that didn't fit are returned as a separate block.
[ "Reduce", "join_unit", "s", "shape", "along", "item", "axis", "to", "length", "." ]
python
train
TheOstrichIO/ostrichlib
ostrich/utils/proc.py
https://github.com/TheOstrichIO/ostrichlib/blob/ed97634ccbfb8b5042e61fbd0ac9a27aef281bcb/ostrich/utils/proc.py#L122-L126
def check_returncode(self): """Raise CalledProcessError if the exit code is non-zero.""" if self.returncode: raise CalledProcessError(self.returncode, self.args, self.stdout, self.stderr)
[ "def", "check_returncode", "(", "self", ")", ":", "if", "self", ".", "returncode", ":", "raise", "CalledProcessError", "(", "self", ".", "returncode", ",", "self", ".", "args", ",", "self", ".", "stdout", ",", "self", ".", "stderr", ")" ]
Raise CalledProcessError if the exit code is non-zero.
[ "Raise", "CalledProcessError", "if", "the", "exit", "code", "is", "non", "-", "zero", "." ]
python
train
osrg/ryu
ryu/services/protocols/bgp/utils/validation.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/utils/validation.py#L39-L51
def is_valid_ip_prefix(prefix, bits): """Returns True if *prefix* is a valid IPv4 or IPv6 address prefix. *prefix* should be a number between 0 to *bits* length. """ try: # Prefix should be a number prefix = int(prefix) except ValueError: return False # Prefix should be a number between 0 to *bits* return 0 <= prefix <= bits
[ "def", "is_valid_ip_prefix", "(", "prefix", ",", "bits", ")", ":", "try", ":", "# Prefix should be a number", "prefix", "=", "int", "(", "prefix", ")", "except", "ValueError", ":", "return", "False", "# Prefix should be a number between 0 to *bits*", "return", "0", ...
Returns True if *prefix* is a valid IPv4 or IPv6 address prefix. *prefix* should be a number between 0 to *bits* length.
[ "Returns", "True", "if", "*", "prefix", "*", "is", "a", "valid", "IPv4", "or", "IPv6", "address", "prefix", "." ]
python
train
matousc89/padasip
padasip/misc/error_evaluation.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/misc/error_evaluation.py#L221-L246
def get_mean_error(x1, x2=-1, function="MSE"): """ This function returns desired mean error. Options are: MSE, MAE, RMSE **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - mean error value (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2` """ if function == "MSE": return MSE(x1, x2) elif function == "MAE": return MAE(x1, x2) elif function == "RMSE": return RMSE(x1, x2) else: raise ValueError('The provided error function is not known')
[ "def", "get_mean_error", "(", "x1", ",", "x2", "=", "-", "1", ",", "function", "=", "\"MSE\"", ")", ":", "if", "function", "==", "\"MSE\"", ":", "return", "MSE", "(", "x1", ",", "x2", ")", "elif", "function", "==", "\"MAE\"", ":", "return", "MAE", ...
This function returns desired mean error. Options are: MSE, MAE, RMSE **Args:** * `x1` - first data series or error (1d array) **Kwargs:** * `x2` - second series (1d array) if first series was not error directly,\\ then this should be the second series **Returns:** * `e` - mean error value (float) obtained directly from `x1`, \\ or as a difference of `x1` and `x2`
[ "This", "function", "returns", "desired", "mean", "error", ".", "Options", "are", ":", "MSE", "MAE", "RMSE", "**", "Args", ":", "**" ]
python
train
pinterest/pymemcache
pymemcache/client/base.py
https://github.com/pinterest/pymemcache/blob/f3a348f4ce2248cce8b398e93e08d984fb9100e5/pymemcache/client/base.py#L382-L399
def append(self, key, value, expire=0, noreply=None): """ The memcached "append" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True. """ if noreply is None: noreply = self.default_noreply return self._store_cmd(b'append', {key: value}, expire, noreply)[key]
[ "def", "append", "(", "self", ",", "key", ",", "value", ",", "expire", "=", "0", ",", "noreply", "=", "None", ")", ":", "if", "noreply", "is", "None", ":", "noreply", "=", "self", ".", "default_noreply", "return", "self", ".", "_store_cmd", "(", "b'a...
The memcached "append" command. Args: key: str, see class docs for details. value: str, see class docs for details. expire: optional int, number of seconds until the item is expired from the cache, or zero for no expiry (the default). noreply: optional bool, True to not wait for the reply (defaults to self.default_noreply). Returns: True.
[ "The", "memcached", "append", "command", "." ]
python
train
swharden/SWHLab
swhlab/common.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/common.py#L97-L108
def timeit(timer=None): """simple timer. returns a time object, or a string.""" if timer is None: return time.time() else: took=time.time()-timer if took<1: return "%.02f ms"%(took*1000.0) elif took<60: return "%.02f s"%(took) else: return "%.02f min"%(took/60.0)
[ "def", "timeit", "(", "timer", "=", "None", ")", ":", "if", "timer", "is", "None", ":", "return", "time", ".", "time", "(", ")", "else", ":", "took", "=", "time", ".", "time", "(", ")", "-", "timer", "if", "took", "<", "1", ":", "return", "\"%....
simple timer. returns a time object, or a string.
[ "simple", "timer", ".", "returns", "a", "time", "object", "or", "a", "string", "." ]
python
valid
projectatomic/osbs-client
osbs/core.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/core.py#L326-L334
def get_all_build_configs_by_labels(self, label_selectors): """ Returns all builds matching a given set of label selectors. It is up to the calling function to filter the results. """ labels = ['%s=%s' % (field, value) for field, value in label_selectors] labels = ','.join(labels) url = self._build_url("buildconfigs/", labelSelector=labels) return self._get(url).json()['items']
[ "def", "get_all_build_configs_by_labels", "(", "self", ",", "label_selectors", ")", ":", "labels", "=", "[", "'%s=%s'", "%", "(", "field", ",", "value", ")", "for", "field", ",", "value", "in", "label_selectors", "]", "labels", "=", "','", ".", "join", "("...
Returns all builds matching a given set of label selectors. It is up to the calling function to filter the results.
[ "Returns", "all", "builds", "matching", "a", "given", "set", "of", "label", "selectors", ".", "It", "is", "up", "to", "the", "calling", "function", "to", "filter", "the", "results", "." ]
python
train
singularityhub/singularity-cli
spython/instance/cmd/iutils.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/instance/cmd/iutils.py#L9-L33
def parse_table(table_string, header, remove_rows=1): '''parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header ''' rows = [x for x in table_string.split('\n') if x] rows = rows[0+remove_rows:] # Parse into json dictionary parsed = [] for row in rows: item = {} # This assumes no white spaces in each entry, which should be the case row = [x for x in row.split(' ') if x] for e in range(len(row)): item[header[e]] = row[e] parsed.append(item) return parsed
[ "def", "parse_table", "(", "table_string", ",", "header", ",", "remove_rows", "=", "1", ")", ":", "rows", "=", "[", "x", "for", "x", "in", "table_string", ".", "split", "(", "'\\n'", ")", "if", "x", "]", "rows", "=", "rows", "[", "0", "+", "remove_...
parse a table to json from a string, where a header is expected by default. Return a jsonified table. Parameters ========== table_string: the string table, ideally with a header header: header of expected table, must match dimension (number columns) remove_rows: an integer to indicate a number of rows to remove from top the default is 1 assuming we don't want the header
[ "parse", "a", "table", "to", "json", "from", "a", "string", "where", "a", "header", "is", "expected", "by", "default", ".", "Return", "a", "jsonified", "table", "." ]
python
train
iotile/coretools
iotileemulate/iotile/emulate/reference/controller_features/config_database.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileemulate/iotile/emulate/reference/controller_features/config_database.py#L109-L122
def compact(self): """Remove all invalid config entries.""" saved_length = 0 to_remove = [] for i, entry in enumerate(self.entries): if not entry.valid: to_remove.append(i) saved_length += entry.data_space() for i in reversed(to_remove): del self.entries[i] self.data_index -= saved_length
[ "def", "compact", "(", "self", ")", ":", "saved_length", "=", "0", "to_remove", "=", "[", "]", "for", "i", ",", "entry", "in", "enumerate", "(", "self", ".", "entries", ")", ":", "if", "not", "entry", ".", "valid", ":", "to_remove", ".", "append", ...
Remove all invalid config entries.
[ "Remove", "all", "invalid", "config", "entries", "." ]
python
train
beetbox/audioread
audioread/macca.py
https://github.com/beetbox/audioread/blob/c8bedf7880f13a7b7488b108aaf245d648674818/audioread/macca.py#L281-L297
def setup(self, bitdepth=16): """Set the client format parameters, specifying the desired PCM audio data format to be read from the file. Must be called before reading from the file. """ fmt = self.get_file_format() newfmt = copy.copy(fmt) newfmt.mFormatID = AUDIO_ID_PCM newfmt.mFormatFlags = \ PCM_IS_SIGNED_INT | PCM_IS_PACKED newfmt.mBitsPerChannel = bitdepth newfmt.mBytesPerPacket = \ (fmt.mChannelsPerFrame * newfmt.mBitsPerChannel // 8) newfmt.mFramesPerPacket = 1 newfmt.mBytesPerFrame = newfmt.mBytesPerPacket self.set_client_format(newfmt)
[ "def", "setup", "(", "self", ",", "bitdepth", "=", "16", ")", ":", "fmt", "=", "self", ".", "get_file_format", "(", ")", "newfmt", "=", "copy", ".", "copy", "(", "fmt", ")", "newfmt", ".", "mFormatID", "=", "AUDIO_ID_PCM", "newfmt", ".", "mFormatFlags"...
Set the client format parameters, specifying the desired PCM audio data format to be read from the file. Must be called before reading from the file.
[ "Set", "the", "client", "format", "parameters", "specifying", "the", "desired", "PCM", "audio", "data", "format", "to", "be", "read", "from", "the", "file", ".", "Must", "be", "called", "before", "reading", "from", "the", "file", "." ]
python
train
shoebot/shoebot
lib/photobot/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/photobot/__init__.py#L486-L524
def mask(self): """Masks the layer below with this layer. Commits the current layer to the alpha channel of the previous layer. Primarily, mask() is useful when using gradient layers as masks on images below. For example: canvas.layer("image.jpg") canvas.gradient() canvas.layer(2).flip() canvas.layer(2).mask() Adds a white-to-black linear gradient to the alpha channel of image.jpg, making it evolve from opaque on the left to transparent on the right. """ if len(self.canvas.layers) < 2: return i = self.index() if i == 0: return layer = self.canvas.layers[i-1] alpha = Image.new("L", layer.img.size, 0) #Make a composite of the mask layer in grayscale #and its own alpha channel. mask = self.canvas.layers[i] flat = ImageChops.darker(mask.img.convert("L"), mask.img.split()[3]) alpha.paste(flat, (mask.x,mask.y)) alpha = ImageChops.darker(alpha, layer.img.split()[3]) layer.img.putalpha(alpha) self.delete()
[ "def", "mask", "(", "self", ")", ":", "if", "len", "(", "self", ".", "canvas", ".", "layers", ")", "<", "2", ":", "return", "i", "=", "self", ".", "index", "(", ")", "if", "i", "==", "0", ":", "return", "layer", "=", "self", ".", "canvas", "....
Masks the layer below with this layer. Commits the current layer to the alpha channel of the previous layer. Primarily, mask() is useful when using gradient layers as masks on images below. For example: canvas.layer("image.jpg") canvas.gradient() canvas.layer(2).flip() canvas.layer(2).mask() Adds a white-to-black linear gradient to the alpha channel of image.jpg, making it evolve from opaque on the left to transparent on the right.
[ "Masks", "the", "layer", "below", "with", "this", "layer", ".", "Commits", "the", "current", "layer", "to", "the", "alpha", "channel", "of", "the", "previous", "layer", ".", "Primarily", "mask", "()", "is", "useful", "when", "using", "gradient", "layers", ...
python
valid
SCIP-Interfaces/PySCIPOpt
examples/unfinished/flp_nonlinear.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/flp_nonlinear.py#L22-L73
def flp_nonlinear_mselect(I,J,d,M,f,c,K): """flp_nonlinear_mselect -- use multiple selection model Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j - K: number of linear pieces for approximation of non-linear cost function Returns a model, ready to be solved. """ a,b = {},{} for j in J: U = M[j] L = 0 width = U/float(K) a[j] = [k*width for k in range(K+1)] b[j] = [f[j]*math.sqrt(value) for value in a[j]] model = Model("nonlinear flp -- piecewise linear version with multiple selection") x = {} for j in J: for i in I: x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j # total volume transported from plant j, corresponding (linearized) cost, selection variable: X,F,z = {},{},{} for j in J: # add constraints for linking piecewise linear part: X[j],F[j],z[j] = mult_selection(model,a[j],b[j]) X[j].ub = M[j] # for i in I: # model.addCons( # x[i,j] <= \ # quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\ # "Strong(%s,%s)"%(i,j)) # constraints for customer's demand satisfaction for i in I: model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i) for j in J: model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j) model.setObjective(quicksum(F[j] for j in J) +\ quicksum(c[i,j]*x[i,j] for j in J for i in I),\ "minimize") model.data = x,X,F return model
[ "def", "flp_nonlinear_mselect", "(", "I", ",", "J", ",", "d", ",", "M", ",", "f", ",", "c", ",", "K", ")", ":", "a", ",", "b", "=", "{", "}", ",", "{", "}", "for", "j", "in", "J", ":", "U", "=", "M", "[", "j", "]", "L", "=", "0", "wid...
flp_nonlinear_mselect -- use multiple selection model Parameters: - I: set of customers - J: set of facilities - d[i]: demand for customer i - M[j]: capacity of facility j - f[j]: fixed cost for using a facility in point j - c[i,j]: unit cost of servicing demand point i from facility j - K: number of linear pieces for approximation of non-linear cost function Returns a model, ready to be solved.
[ "flp_nonlinear_mselect", "--", "use", "multiple", "selection", "model", "Parameters", ":", "-", "I", ":", "set", "of", "customers", "-", "J", ":", "set", "of", "facilities", "-", "d", "[", "i", "]", ":", "demand", "for", "customer", "i", "-", "M", "[",...
python
train
fastai/fastai
fastai/callback.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/callback.py#L358-L360
def annealing_linear(start:Number, end:Number, pct:float)->Number: "Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0." return start + pct * (end-start)
[ "def", "annealing_linear", "(", "start", ":", "Number", ",", "end", ":", "Number", ",", "pct", ":", "float", ")", "->", "Number", ":", "return", "start", "+", "pct", "*", "(", "end", "-", "start", ")" ]
Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0.
[ "Linearly", "anneal", "from", "start", "to", "end", "as", "pct", "goes", "from", "0", ".", "0", "to", "1", ".", "0", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L2860-L2877
def get_assessment_taken_bank_session(self, proxy): """Gets the session for retrieving taken assessments to bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentTakenBankSession) - an ``AssessmentTakenBankSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_taken_bank()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_bank()`` is ``true``.* """ if not self.supports_assessment_taken_bank(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.AssessmentTakenBankSession(proxy=proxy, runtime=self._runtime)
[ "def", "get_assessment_taken_bank_session", "(", "self", ",", "proxy", ")", ":", "if", "not", "self", ".", "supports_assessment_taken_bank", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "# pylint: disable=no-member", "return", "sessions", ".", ...
Gets the session for retrieving taken assessments to bank mappings. arg: proxy (osid.proxy.Proxy): a proxy return: (osid.assessment.AssessmentTakenBankSession) - an ``AssessmentTakenBankSession`` raise: NullArgument - ``proxy`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_taken_bank()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_taken_bank()`` is ``true``.*
[ "Gets", "the", "session", "for", "retrieving", "taken", "assessments", "to", "bank", "mappings", "." ]
python
train
cmbruns/pyopenvr
src/openvr/__init__.py
https://github.com/cmbruns/pyopenvr/blob/68395d26bb3df6ab1f0f059c38d441f962938be6/src/openvr/__init__.py#L4888-L4894
def getOverlayTransformTrackedDeviceComponent(self, ulOverlayHandle, pchComponentName, unComponentNameSize): """Gets the transform information when the overlay is rendering on a component.""" fn = self.function_table.getOverlayTransformTrackedDeviceComponent punDeviceIndex = TrackedDeviceIndex_t() result = fn(ulOverlayHandle, byref(punDeviceIndex), pchComponentName, unComponentNameSize) return result, punDeviceIndex
[ "def", "getOverlayTransformTrackedDeviceComponent", "(", "self", ",", "ulOverlayHandle", ",", "pchComponentName", ",", "unComponentNameSize", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayTransformTrackedDeviceComponent", "punDeviceIndex", "=", "Tracke...
Gets the transform information when the overlay is rendering on a component.
[ "Gets", "the", "transform", "information", "when", "the", "overlay", "is", "rendering", "on", "a", "component", "." ]
python
train
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L664-L753
def server_add_and_update_opts(*args, **kwargs): """ shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ... """ def port_range_callback(ctx, param, value): if not value: return None value = value.lower().strip() if value == "unspecified": return None, None if value == "unrestricted": return 1024, 65535 try: lower, upper = map(int, value.split("-")) except ValueError: # too many/few values from split or non-integer(s) raise click.BadParameter( "must specify as 'unspecified', " "'unrestricted', or as range separated " "by a hyphen (e.g. '50000-51000')" ) if not 1024 <= lower <= 65535 or not 1024 <= upper <= 65535: raise click.BadParameter("must be within the 1024-65535 range") return (lower, upper) if lower <= upper else (upper, lower) def inner_decorator(f, add=False): f = click.option("--hostname", required=add, help="Server Hostname.")(f) default_scheme = "gsiftp" if add else None f = click.option( "--scheme", help="Scheme for the Server.", type=CaseInsensitiveChoice(("gsiftp", "ftp")), default=default_scheme, show_default=add, )(f) default_port = 2811 if add else None f = click.option( "--port", help="Port for Globus control channel connections.", type=int, default=default_port, show_default=add, )(f) f = click.option( "--subject", help=( "Subject of the X509 Certificate of the server. When " "unspecified, the CN must match the server hostname." ), )(f) for adjective, our_preposition, their_preposition in [ ("incoming", "to", "from"), ("outgoing", "from", "to"), ]: f = click.option( "--{}-data-ports".format(adjective), callback=port_range_callback, help="Indicate to firewall administrators at other sites how to " "allow {} traffic {} this server {} their own. Specify as " "either 'unspecified', 'unrestricted', or as range of " "ports separated by a hyphen (e.g. '50000-51000') within " "the 1024-65535 range.".format( adjective, our_preposition, their_preposition ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
[ "def", "server_add_and_update_opts", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "port_range_callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "not", "value", ":", "return", "None", "value", "=", "value", ".", "lower", ...
shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ...
[ "shared", "collection", "of", "options", "for", "globus", "transfer", "endpoint", "server", "add", "and", "globus", "transfer", "endpoint", "server", "update", ".", "Accepts", "a", "toggle", "to", "know", "if", "it", "s", "being", "used", "as", "add", "or", ...
python
train
singingwolfboy/flask-dance
flask_dance/utils.py
https://github.com/singingwolfboy/flask-dance/blob/87d45328bbdaff833559a6d3da71461fe4579592/flask_dance/utils.py#L82-L92
def timestamp_from_datetime(dt): """ Given a datetime, in UTC, return a float that represents the timestamp for that datetime. http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python#8778548 """ dt = dt.replace(tzinfo=utc) if hasattr(dt, "timestamp") and callable(dt.timestamp): return dt.replace(tzinfo=utc).timestamp() return (dt - datetime(1970, 1, 1, tzinfo=utc)).total_seconds()
[ "def", "timestamp_from_datetime", "(", "dt", ")", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "utc", ")", "if", "hasattr", "(", "dt", ",", "\"timestamp\"", ")", "and", "callable", "(", "dt", ".", "timestamp", ")", ":", "return", "dt", "...
Given a datetime, in UTC, return a float that represents the timestamp for that datetime. http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python#8778548
[ "Given", "a", "datetime", "in", "UTC", "return", "a", "float", "that", "represents", "the", "timestamp", "for", "that", "datetime", "." ]
python
train
thunder-project/thunder
thunder/series/series.py
https://github.com/thunder-project/thunder/blob/967ff8f3e7c2fabe1705743d95eb2746d4329786/thunder/series/series.py#L914-L943
def convolve(self, signal, mode='full'): """ Convolve series data against another signal. Parameters ---------- signal : array Signal to convolve with (must be 1D) mode : str, optional, default='full' Mode of convolution, options are 'full', 'same', and 'valid' """ from numpy import convolve s = asarray(signal) n = size(self.index) m = size(s) # use expected lengths to make a new index if mode == 'same': newmax = max(n, m) elif mode == 'valid': newmax = max(m, n) - min(m, n) + 1 else: newmax = n+m-1 newindex = arange(0, newmax) return self.map(lambda x: convolve(x, signal, mode), index=newindex)
[ "def", "convolve", "(", "self", ",", "signal", ",", "mode", "=", "'full'", ")", ":", "from", "numpy", "import", "convolve", "s", "=", "asarray", "(", "signal", ")", "n", "=", "size", "(", "self", ".", "index", ")", "m", "=", "size", "(", "s", ")"...
Convolve series data against another signal. Parameters ---------- signal : array Signal to convolve with (must be 1D) mode : str, optional, default='full' Mode of convolution, options are 'full', 'same', and 'valid'
[ "Convolve", "series", "data", "against", "another", "signal", "." ]
python
train
pallets/werkzeug
src/werkzeug/http.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/http.py#L1086-L1219
def dump_cookie( key, value="", max_age=None, expires=None, path="/", domain=None, secure=False, httponly=False, charset="utf-8", sync_expires=True, max_size=4093, samesite=None, ): """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". .. _`cookie`: http://browsercookielimits.squawky.net/ """ key = to_bytes(key, charset) value = to_bytes(value, charset) if path is not None: path = iri_to_uri(path, charset) domain = _make_cookie_domain(domain) if isinstance(max_age, timedelta): max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds if expires is not None: if not isinstance(expires, string_types): expires = cookie_date(expires) elif max_age is not None and sync_expires: expires = to_bytes(cookie_date(time() + max_age)) samesite = samesite.title() if samesite else None if samesite not in ("Strict", "Lax", None): raise ValueError("invalid SameSite value; must be 'Strict', 'Lax' or None") buf = [key + b"=" + _cookie_quote(value)] # XXX: In theory all of these parameters that are not marked with `None` # should be quoted. Because stdlib did not quote it before I did not # want to introduce quoting there now. for k, v, q in ( (b"Domain", domain, True), (b"Expires", expires, False), (b"Max-Age", max_age, False), (b"Secure", secure, None), (b"HttpOnly", httponly, None), (b"Path", path, False), (b"SameSite", samesite, False), ): if q is None: if v: buf.append(k) continue if v is None: continue tmp = bytearray(k) if not isinstance(v, (bytes, bytearray)): v = to_bytes(text_type(v), charset) if q: v = _cookie_quote(v) tmp += b"=" + v buf.append(bytes(tmp)) # The return value will be an incorrectly encoded latin1 header on # Python 3 for consistency with the headers object and a bytestring # on Python 2 because that's how the API makes more sense. rv = b"; ".join(buf) if not PY2: rv = rv.decode("latin1") # Warn if the final value of the cookie is less than the limit. If the # cookie is too large, then it may be silently ignored, which can be quite # hard to debug. cookie_size = len(rv) if max_size and cookie_size > max_size: value_size = len(value) warnings.warn( 'The "{key}" cookie is too large: the value was {value_size} bytes' " but the header required {extra_size} extra bytes. The final size" " was {cookie_size} bytes but the limit is {max_size} bytes." " Browsers may silently ignore cookies larger than this.".format( key=key, value_size=value_size, extra_size=cookie_size - value_size, cookie_size=cookie_size, max_size=max_size, ), stacklevel=2, ) return rv
[ "def", "dump_cookie", "(", "key", ",", "value", "=", "\"\"", ",", "max_age", "=", "None", ",", "expires", "=", "None", ",", "path", "=", "\"/\"", ",", "domain", "=", "None", ",", "secure", "=", "False", ",", "httponly", "=", "False", ",", "charset", ...
Creates a new Set-Cookie header without the ``Set-Cookie`` prefix The parameters are the same as in the cookie Morsel object in the Python standard library but it accepts unicode data, too. On Python 3 the return value of this function will be a unicode string, on Python 2 it will be a native string. In both cases the return value is usually restricted to ascii as the vast majority of values are properly escaped, but that is no guarantee. If a unicode string is returned it's tunneled through latin1 as required by PEP 3333. The return value is not ASCII safe if the key contains unicode characters. This is technically against the specification but happens in the wild. It's strongly recommended to not use non-ASCII values for the keys. :param max_age: should be a number of seconds, or `None` (default) if the cookie should last only as long as the client's browser session. Additionally `timedelta` objects are accepted, too. :param expires: should be a `datetime` object or unix timestamp. :param path: limits the cookie to a given path, per default it will span the whole domain. :param domain: Use this if you want to set a cross-domain cookie. For example, ``domain=".example.com"`` will set a cookie that is readable by the domain ``www.example.com``, ``foo.example.com`` etc. Otherwise, a cookie will only be readable by the domain that set it. :param secure: The cookie will only be available via HTTPS :param httponly: disallow JavaScript to access the cookie. This is an extension to the cookie standard and probably not supported by all browsers. :param charset: the encoding for unicode values. :param sync_expires: automatically set expires if max_age is defined but expires not. :param max_size: Warn if the final header value exceeds this size. The default, 4093, should be safely `supported by most browsers <cookie_>`_. Set to 0 to disable this check. :param samesite: Limits the scope of the cookie such that it will only be attached to requests if those requests are "same-site". .. _`cookie`: http://browsercookielimits.squawky.net/
[ "Creates", "a", "new", "Set", "-", "Cookie", "header", "without", "the", "Set", "-", "Cookie", "prefix", "The", "parameters", "are", "the", "same", "as", "in", "the", "cookie", "Morsel", "object", "in", "the", "Python", "standard", "library", "but", "it", ...
python
train
kata198/indexedredis
IndexedRedis/fields/foreign.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/fields/foreign.py#L231-L240
def isFetched(self): ''' isFetched - @see ForeignLinkData.isFetched ''' if not self.obj: return False if not self.pk or None in self.obj: return False return not bool(self.obj is None)
[ "def", "isFetched", "(", "self", ")", ":", "if", "not", "self", ".", "obj", ":", "return", "False", "if", "not", "self", ".", "pk", "or", "None", "in", "self", ".", "obj", ":", "return", "False", "return", "not", "bool", "(", "self", ".", "obj", ...
isFetched - @see ForeignLinkData.isFetched
[ "isFetched", "-" ]
python
valid
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L460-L494
def _cell_scalar(self, name=None): """ Returns the cell scalars of a vtk object Parameters ---------- name : str Name of cell scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars """ if name is None: # use active scalar array field, name = self.active_scalar_info if field != CELL_DATA_FIELD: raise RuntimeError('Must specify an array to fetch.') vtkarr = self.GetCellData().GetArray(name) if vtkarr is None: raise AssertionError('({}) is not a cell scalar'.format(name)) # numpy does not support bit array data types if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) if name not in self._cell_bool_array_names: self._cell_bool_array_names.append(name) array = vtk_to_numpy(vtkarr) if array.dtype == np.uint8 and name in self._cell_bool_array_names: array = array.view(np.bool) return array
[ "def", "_cell_scalar", "(", "self", ",", "name", "=", "None", ")", ":", "if", "name", "is", "None", ":", "# use active scalar array", "field", ",", "name", "=", "self", ".", "active_scalar_info", "if", "field", "!=", "CELL_DATA_FIELD", ":", "raise", "Runtime...
Returns the cell scalars of a vtk object Parameters ---------- name : str Name of cell scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars
[ "Returns", "the", "cell", "scalars", "of", "a", "vtk", "object" ]
python
train
tommikaikkonen/prettyprinter
prettyprinter/__init__.py
https://github.com/tommikaikkonen/prettyprinter/blob/6b405884b8085eaf867e81c02b7b662b463ac5a0/prettyprinter/__init__.py#L110-L139
def pformat( object, indent=_UNSET_SENTINEL, width=_UNSET_SENTINEL, depth=_UNSET_SENTINEL, *, ribbon_width=_UNSET_SENTINEL, max_seq_len=_UNSET_SENTINEL, compact=_UNSET_SENTINEL, sort_dict_keys=_UNSET_SENTINEL ): """ Returns a pretty printed representation of the object as a ``str``. Accepts the same parameters as :func:`~prettyprinter.pprint`. The output is not colored. """ sdocs = python_to_sdocs( object, **_merge_defaults( indent=indent, width=width, depth=depth, ribbon_width=ribbon_width, max_seq_len=max_seq_len, sort_dict_keys=sort_dict_keys, ) ) stream = StringIO() default_render_to_stream(stream, sdocs) return stream.getvalue()
[ "def", "pformat", "(", "object", ",", "indent", "=", "_UNSET_SENTINEL", ",", "width", "=", "_UNSET_SENTINEL", ",", "depth", "=", "_UNSET_SENTINEL", ",", "*", ",", "ribbon_width", "=", "_UNSET_SENTINEL", ",", "max_seq_len", "=", "_UNSET_SENTINEL", ",", "compact",...
Returns a pretty printed representation of the object as a ``str``. Accepts the same parameters as :func:`~prettyprinter.pprint`. The output is not colored.
[ "Returns", "a", "pretty", "printed", "representation", "of", "the", "object", "as", "a", "str", ".", "Accepts", "the", "same", "parameters", "as", ":", "func", ":", "~prettyprinter", ".", "pprint", ".", "The", "output", "is", "not", "colored", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/auxfiletools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/auxfiletools.py#L595-L611
def get_filename(self, variable): """Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 >>> dummies.v2af.get_filename(eqb) """ fn2var = self._type2filename2variable.get(type(variable), {}) for (fn_, var) in fn2var.items(): if var == variable: return fn_ return None
[ "def", "get_filename", "(", "self", ",", "variable", ")", ":", "fn2var", "=", "self", ".", "_type2filename2variable", ".", "get", "(", "type", "(", "variable", ")", ",", "{", "}", ")", "for", "(", "fn_", ",", "var", ")", "in", "fn2var", ".", "items",...
Return the auxiliary file name the given variable is allocated to or |None| if the given variable is not allocated to any auxiliary file name. >>> from hydpy import dummies >>> eqb = dummies.v2af.eqb[0] >>> dummies.v2af.get_filename(eqb) 'file1' >>> eqb += 500.0 >>> dummies.v2af.get_filename(eqb)
[ "Return", "the", "auxiliary", "file", "name", "the", "given", "variable", "is", "allocated", "to", "or", "|None|", "if", "the", "given", "variable", "is", "not", "allocated", "to", "any", "auxiliary", "file", "name", "." ]
python
train
CityOfZion/neo-python
neo/Wallets/NEP5Token.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Wallets/NEP5Token.py#L255-L276
def Mint(self, wallet, mint_to_addr, attachment_args, invoke_attrs=None): """ Call the "mintTokens" function of the smart contract. Args: wallet (neo.Wallets.Wallet): a wallet instance. mint_to_addr (str): public address of the account to mint the tokens to. attachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3'] invoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results. """ invoke_args = [self.ScriptHash.ToString(), 'mintTokens', []] invoke_args = invoke_args + attachment_args tx, fee, results, num_ops, engine_success = TestInvokeContract(wallet, invoke_args, None, True, from_addr=mint_to_addr, invoke_attrs=invoke_attrs) return tx, fee, results
[ "def", "Mint", "(", "self", ",", "wallet", ",", "mint_to_addr", ",", "attachment_args", ",", "invoke_attrs", "=", "None", ")", ":", "invoke_args", "=", "[", "self", ".", "ScriptHash", ".", "ToString", "(", ")", ",", "'mintTokens'", ",", "[", "]", "]", ...
Call the "mintTokens" function of the smart contract. Args: wallet (neo.Wallets.Wallet): a wallet instance. mint_to_addr (str): public address of the account to mint the tokens to. attachment_args: (list): a list of arguments used to attach neo and/or gas to an invoke, eg ['--attach-gas=10.0','--attach-neo=3'] invoke_attrs: (list): a list of TransactionAttributes to be attached to the mint transaction Returns: tuple: InvocationTransaction: the transaction. int: the transaction fee. list: the neo VM evaluation stack results.
[ "Call", "the", "mintTokens", "function", "of", "the", "smart", "contract", "." ]
python
train
Yelp/kafka-utils
kafka_utils/util/zookeeper.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/zookeeper.py#L529-L547
def get_cluster_plan(self): """Fetch cluster plan from zookeeper.""" _log.info('Fetching current cluster-topology from Zookeeper...') cluster_layout = self.get_topics(fetch_partition_state=False) # Re-format cluster-layout partitions = [ { 'topic': topic_id, 'partition': int(p_id), 'replicas': partitions_data['replicas'] } for topic_id, topic_info in six.iteritems(cluster_layout) for p_id, partitions_data in six.iteritems(topic_info['partitions']) ] return { 'version': 1, 'partitions': partitions }
[ "def", "get_cluster_plan", "(", "self", ")", ":", "_log", ".", "info", "(", "'Fetching current cluster-topology from Zookeeper...'", ")", "cluster_layout", "=", "self", ".", "get_topics", "(", "fetch_partition_state", "=", "False", ")", "# Re-format cluster-layout", "pa...
Fetch cluster plan from zookeeper.
[ "Fetch", "cluster", "plan", "from", "zookeeper", "." ]
python
train
naught101/sobol_seq
sobol_seq/sobol_seq.py
https://github.com/naught101/sobol_seq/blob/6ac1799818a1b9a359a5fc86517173584fe34613/sobol_seq/sobol_seq.py#L114-L130
def i4_sobol_generate(dim_num, n, skip=1): """ i4_sobol_generate generates a Sobol dataset. Parameters: Input, integer dim_num, the spatial dimension. Input, integer N, the number of points to generate. Input, integer SKIP, the number of initial points to skip. Output, real R(M,N), the points. """ r = np.full((n, dim_num), np.nan) for j in range(n): seed = j + skip r[j, 0:dim_num], next_seed = i4_sobol(dim_num, seed) return r
[ "def", "i4_sobol_generate", "(", "dim_num", ",", "n", ",", "skip", "=", "1", ")", ":", "r", "=", "np", ".", "full", "(", "(", "n", ",", "dim_num", ")", ",", "np", ".", "nan", ")", "for", "j", "in", "range", "(", "n", ")", ":", "seed", "=", ...
i4_sobol_generate generates a Sobol dataset. Parameters: Input, integer dim_num, the spatial dimension. Input, integer N, the number of points to generate. Input, integer SKIP, the number of initial points to skip. Output, real R(M,N), the points.
[ "i4_sobol_generate", "generates", "a", "Sobol", "dataset", "." ]
python
train
ntucllab/libact
libact/query_strategies/multiclass/mdsp.py
https://github.com/ntucllab/libact/blob/e37e9ed6c36febe701d84b2d495c958ab02f0bc8/libact/query_strategies/multiclass/mdsp.py#L24-L171
def _smacof_single_p(similarities, n_uq, metric=True, n_components=2, init=None, max_iter=300, verbose=0, eps=1e-3, random_state=None): """ Computes multidimensional scaling using SMACOF algorithm Parameters ---------- n_uq similarities: symmetric ndarray, shape [n * n] similarities between the points metric: boolean, optional, default: True compute metric or nonmetric SMACOF algorithm n_components: int, optional, default: 2 number of dimension in which to immerse the similarities overwritten if initial array is provided. init: {None or ndarray}, optional if None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array max_iter: int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run verbose: int, optional, default: 0 level of verbosity eps: float, optional, default: 1e-6 relative tolerance w.r.t stress to declare converge random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- X: ndarray (n_samples, n_components), float coordinates of the n_samples points in a n_components-space stress_: float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points) n_iter : int Number of iterations run. """ similarities = check_symmetric(similarities, raise_exception=True) n_samples = similarities.shape[0] random_state = check_random_state(random_state) W = np.ones((n_samples, n_samples)) W[:n_uq, :n_uq] = 0.0 W[n_uq:, n_uq:] = 0.0 # W[np.arange(len(W)), np.arange(len(W))] = 0.0 V = -W V[np.arange(len(V)), np.arange(len(V))] = W.sum(axis=1) e = np.ones((n_samples, 1)) Vp = np.linalg.inv(V + np.dot(e, e.T)/n_samples) - np.dot(e, e.T)/n_samples # Vp = np.linalg.pinv(V) # sim_flat = ((1 - np.tri(n_samples)) * similarities).ravel() sim_flat = similarities.ravel() sim_flat_w = sim_flat[sim_flat != 0] if init is None: # Randomly choose initial configuration X = random_state.rand(n_samples * n_components) X = X.reshape((n_samples, n_components)) else: # overrides the parameter p n_components = init.shape[1] if n_samples != init.shape[0]: raise ValueError("init matrix should be of shape (%d, %d)" % (n_samples, n_components)) X = init old_stress = None ir = IsotonicRegression() for it in range(max_iter): # Compute distance and monotonic regression dis = euclidean_distances(X) if metric: disparities = similarities else: # dis_flat = dis.ravel() # # similarities with 0 are considered as missing values # dis_flat_w = dis_flat[sim_flat != 0] # # Compute the disparities using a monotonic regression # disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) # disparities = dis_flat.copy() # disparities[sim_flat != 0] = disparities_flat # disparities = disparities.reshape((n_samples, n_samples)) # disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / # (disparities ** 2).sum()) dis_flat = dis.ravel() # similarities with 0 are considered as missing values dis_flat_w = dis_flat[sim_flat != 0] # Compute the disparities using a monotonic regression disparities_flat = ir.fit_transform(sim_flat_w, dis_flat_w) disparities = dis_flat.copy() disparities[sim_flat != 0] = disparities_flat disparities = disparities.reshape((n_samples, n_samples)) disparities *= np.sqrt((n_samples * (n_samples - 1) / 2) / (disparities ** 2).sum()) disparities[similarities==0] = 0 # Compute stress # stress = ((dis.ravel() - disparities.ravel()) ** 2).sum() / 2 _stress = (W.ravel()*((dis.ravel() - disparities.ravel()) ** 2)).sum() / 2 # Update X using the Guttman transform # dis[dis == 0] = 1e-5 # ratio = disparities / dis # B = - ratio # B[np.arange(len(B)), np.arange(len(B))] += ratio.sum(axis=1) # X = 1. / n_samples * np.dot(B, X) # print (1. / n_samples * np.dot(B, X))[:5].T dis[dis == 0] = 1e-5 ratio = disparities / dis _B = - W*ratio _B[np.arange(len(_B)), np.arange(len(_B))] += (W*ratio).sum(axis=1) X = np.dot(Vp, np.dot(_B, X)) # print X[:5].T dis = np.sqrt((X ** 2).sum(axis=1)).sum() if verbose >= 2: print('it: %d, stress %s' % (it, _stress)) if old_stress is not None: if(old_stress - _stress / dis) < eps: if verbose: print('breaking at iteration %d with stress %s' % (it, _stress)) break old_stress = _stress / dis return X, _stress, it + 1
[ "def", "_smacof_single_p", "(", "similarities", ",", "n_uq", ",", "metric", "=", "True", ",", "n_components", "=", "2", ",", "init", "=", "None", ",", "max_iter", "=", "300", ",", "verbose", "=", "0", ",", "eps", "=", "1e-3", ",", "random_state", "=", ...
Computes multidimensional scaling using SMACOF algorithm Parameters ---------- n_uq similarities: symmetric ndarray, shape [n * n] similarities between the points metric: boolean, optional, default: True compute metric or nonmetric SMACOF algorithm n_components: int, optional, default: 2 number of dimension in which to immerse the similarities overwritten if initial array is provided. init: {None or ndarray}, optional if None, randomly chooses the initial configuration if ndarray, initialize the SMACOF algorithm with this array max_iter: int, optional, default: 300 Maximum number of iterations of the SMACOF algorithm for a single run verbose: int, optional, default: 0 level of verbosity eps: float, optional, default: 1e-6 relative tolerance w.r.t stress to declare converge random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- X: ndarray (n_samples, n_components), float coordinates of the n_samples points in a n_components-space stress_: float The final value of the stress (sum of squared distance of the disparities and the distances for all constrained points) n_iter : int Number of iterations run.
[ "Computes", "multidimensional", "scaling", "using", "SMACOF", "algorithm", "Parameters", "----------", "n_uq", "similarities", ":", "symmetric", "ndarray", "shape", "[", "n", "*", "n", "]", "similarities", "between", "the", "points", "metric", ":", "boolean", "opt...
python
train
RRZE-HPC/pycachesim
cachesim/cache.py
https://github.com/RRZE-HPC/pycachesim/blob/6dd084d29cf91ec19b016e0db9ccdfc8d1f63c5b/cachesim/cache.py#L168-L178
def print_stats(self, header=True, file=sys.stdout): """Pretty print stats table.""" if header: print("CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}".format( "HIT", "MISS", "LOAD", "STORE", "EVICT"), file=file) for s in self.stats(): print("{name:>5} {HIT_count:>6} ({HIT_byte:>8}B) {MISS_count:>6} ({MISS_byte:>8}B) " "{LOAD_count:>6} ({LOAD_byte:>8}B) {STORE_count:>6} " "({STORE_byte:>8}B) {EVICT_count:>6} ({EVICT_byte:>8}B)".format( HIT_bytes=2342, **s), file=file)
[ "def", "print_stats", "(", "self", ",", "header", "=", "True", ",", "file", "=", "sys", ".", "stdout", ")", ":", "if", "header", ":", "print", "(", "\"CACHE {:*^18} {:*^18} {:*^18} {:*^18} {:*^18}\"", ".", "format", "(", "\"HIT\"", ",", "\"MISS\"", ",", "\"L...
Pretty print stats table.
[ "Pretty", "print", "stats", "table", "." ]
python
train
emre/storm
storm/__main__.py
https://github.com/emre/storm/blob/c752defc1b718cfffbf0e0e15532fa1d7840bf6d/storm/__main__.py#L146-L169
def update(name, connection_uri="", id_file="", o=[], config=None): """ Enhanced version of the edit command featuring multiple edits using regular expressions to match entries """ storm_ = get_storm_instance(config) settings = {} if id_file != "": settings['identityfile'] = id_file for option in o: k, v = option.split("=") settings[k] = v try: storm_.update_entry(name, **settings) print(get_formatted_message( '"{0}" updated successfully.'.format( name ), 'success')) except ValueError as error: print(get_formatted_message(error, 'error'), file=sys.stderr) sys.exit(1)
[ "def", "update", "(", "name", ",", "connection_uri", "=", "\"\"", ",", "id_file", "=", "\"\"", ",", "o", "=", "[", "]", ",", "config", "=", "None", ")", ":", "storm_", "=", "get_storm_instance", "(", "config", ")", "settings", "=", "{", "}", "if", ...
Enhanced version of the edit command featuring multiple edits using regular expressions to match entries
[ "Enhanced", "version", "of", "the", "edit", "command", "featuring", "multiple", "edits", "using", "regular", "expressions", "to", "match", "entries" ]
python
train
ladybug-tools/ladybug
ladybug/designday.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/designday.py#L323-L383
def from_ep_string(cls, ep_string, location): """Initalize from an EnergyPlus string of a SizingPeriod:DesignDay. args: ep_string: A full string representing a SizingPeriod:DesignDay. """ # format the object into a list of properties ep_string = ep_string.strip() if '\n' in ep_string: ep_lines = ep_string.split('\n') else: ep_lines = ep_string.split('\r') lines = [l.split('!')[0].strip().replace(',', '') for l in ep_lines] # check to be sure that we have a valid ddy object assert len(lines) == 27 or len(lines) == 26, "Number " \ "of lines of text [{}] does not correspond" \ " to an EP Design Day [26 or 27]".format( len(lines)) lines[-1] = lines[-1].split(';')[0] # extract primary properties name = lines[1] day_type = lines[4] # extract dry bulb temperatures dry_bulb_condition = DryBulbCondition( float(lines[5]), float(lines[6]), lines[7], lines[8]) # extract humidity conditions h_type = lines[9] h_val = 0 if lines[10] == '' else float(lines[10]) if h_type == 'HumidityRatio': h_val = float(lines[12]) elif h_type == 'Enthalpy': h_val = float(lines[13]) humidity_condition = HumidityCondition( h_type, h_val, float(lines[15]), lines[11]) # extract wind conditions wind_condition = WindCondition( float(lines[16]), float(lines[17]), lines[18], lines[19]) # extract the sky conditions sky_model = lines[21] if sky_model == 'ASHRAEClearSky': sky_condition = OriginalClearSkyCondition( int(lines[2]), int(lines[3]), float(lines[26]), lines[20]) elif sky_model == 'ASHRAETau': sky_condition = RevisedClearSkyCondition( int(lines[2]), int(lines[3]), float(lines[24]), float(lines[25]), lines[20]) else: sky_condition = SkyCondition( sky_model, int(lines[2]), int(lines[3]), lines[20]) if sky_model == 'Schedule': sky_condition.beam_shced = lines[22] sky_condition.diff_shced = lines[23] return cls(name, day_type, location, dry_bulb_condition, humidity_condition, wind_condition, sky_condition)
[ "def", "from_ep_string", "(", "cls", ",", "ep_string", ",", "location", ")", ":", "# format the object into a list of properties", "ep_string", "=", "ep_string", ".", "strip", "(", ")", "if", "'\\n'", "in", "ep_string", ":", "ep_lines", "=", "ep_string", ".", "s...
Initalize from an EnergyPlus string of a SizingPeriod:DesignDay. args: ep_string: A full string representing a SizingPeriod:DesignDay.
[ "Initalize", "from", "an", "EnergyPlus", "string", "of", "a", "SizingPeriod", ":", "DesignDay", "." ]
python
train
ewels/MultiQC
multiqc/modules/slamdunk/slamdunk.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/slamdunk/slamdunk.py#L435-L472
def slamdunkOverallRatesPlot (self): """ Generate the overall rates plot """ pconfig = { 'id': 'overallratesplot', 'title': 'Slamdunk: Overall conversion rates in reads', 'cpswitch': False, 'cpswitch_c_active': False, 'ylab': 'Number of reads', 'stacking': 'normal', 'tt_decimals': 2, 'tt_suffix': '%', 'tt_percentages': False, 'hide_zero_cats': False, 'data_labels': [ "Plus Strand +", "Minus Strand -", ] } cats = [OrderedDict(), OrderedDict()] keys = [ ['T>C', 'A>T', 'A>G', 'A>C', 'T>A', 'T>G', 'G>A', 'G>T', 'G>C', 'C>A', 'C>T', 'C>G'], ['A>G','A>T','A>C','T>A','T>G','T>C','G>A','G>T','G>C','C>A','C>T','C>G'] ] for i, k in enumerate(keys): for j, v in enumerate(k): cats[i][v] = { 'color': self.plot_cols[j] } self.add_section ( name = 'Conversion rates per read', anchor = 'slamdunk_overall_rates', description = """This plot shows the individual conversion rates over all reads. It shows these conversion rates strand-specific: This means for a properly labeled sample you would see a T&gt;C excess on the plus-strand and an A&gt;G excess on the minus strand (see the <a href="http://t-neumann.github.io/slamdunk/docs.html#rates" target="_blank">slamdunk docs</a>).""", plot = bargraph.plot([self.rates_data_plus,self.rates_data_minus], cats, pconfig) )
[ "def", "slamdunkOverallRatesPlot", "(", "self", ")", ":", "pconfig", "=", "{", "'id'", ":", "'overallratesplot'", ",", "'title'", ":", "'Slamdunk: Overall conversion rates in reads'", ",", "'cpswitch'", ":", "False", ",", "'cpswitch_c_active'", ":", "False", ",", "'...
Generate the overall rates plot
[ "Generate", "the", "overall", "rates", "plot" ]
python
train
Gandi/gandi.cli
gandi/cli/modules/status.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/modules/status.py#L39-L42
def status(cls): """Retrieve global status from status.gandi.net.""" return cls.json_get('%s/status' % cls.api_url, empty_key=True, send_key=False)
[ "def", "status", "(", "cls", ")", ":", "return", "cls", ".", "json_get", "(", "'%s/status'", "%", "cls", ".", "api_url", ",", "empty_key", "=", "True", ",", "send_key", "=", "False", ")" ]
Retrieve global status from status.gandi.net.
[ "Retrieve", "global", "status", "from", "status", ".", "gandi", ".", "net", "." ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/convert.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/convert.py#L113-L122
def convert_attrs_to_lowercase(obj: Any, attrs: Iterable[str]) -> None: """ Converts the specified attributes of an object to lower case, modifying the object in place. """ for a in attrs: value = getattr(obj, a) if value is None: continue setattr(obj, a, value.lower())
[ "def", "convert_attrs_to_lowercase", "(", "obj", ":", "Any", ",", "attrs", ":", "Iterable", "[", "str", "]", ")", "->", "None", ":", "for", "a", "in", "attrs", ":", "value", "=", "getattr", "(", "obj", ",", "a", ")", "if", "value", "is", "None", ":...
Converts the specified attributes of an object to lower case, modifying the object in place.
[ "Converts", "the", "specified", "attributes", "of", "an", "object", "to", "lower", "case", "modifying", "the", "object", "in", "place", "." ]
python
train
vtkiorg/vtki
vtki/common.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/common.py#L749-L762
def scalar_names(self): """A list of scalar names for the dataset. This makes sure to put the active scalar's name first in the list.""" names = [] for i in range(self.GetPointData().GetNumberOfArrays()): names.append(self.GetPointData().GetArrayName(i)) for i in range(self.GetCellData().GetNumberOfArrays()): names.append(self.GetCellData().GetArrayName(i)) try: names.remove(self.active_scalar_name) names.insert(0, self.active_scalar_name) except ValueError: pass return names
[ "def", "scalar_names", "(", "self", ")", ":", "names", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "GetPointData", "(", ")", ".", "GetNumberOfArrays", "(", ")", ")", ":", "names", ".", "append", "(", "self", ".", "GetPointData", "(", ...
A list of scalar names for the dataset. This makes sure to put the active scalar's name first in the list.
[ "A", "list", "of", "scalar", "names", "for", "the", "dataset", ".", "This", "makes", "sure", "to", "put", "the", "active", "scalar", "s", "name", "first", "in", "the", "list", "." ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/asyoutypeformatter.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/asyoutypeformatter.py#L400-L414
def get_remembered_position(self): """Returns the current position in the partially formatted phone number of the character which was previously passed in as the parameter of input_digit(remember_position=True).""" if not self._able_to_format: return self._original_position accrued_input_index = 0 current_output_index = 0 while (accrued_input_index < self._position_to_remember and current_output_index < len(self._current_output)): if (self._accrued_input_without_formatting[accrued_input_index] == self._current_output[current_output_index]): accrued_input_index += 1 current_output_index += 1 return current_output_index
[ "def", "get_remembered_position", "(", "self", ")", ":", "if", "not", "self", ".", "_able_to_format", ":", "return", "self", ".", "_original_position", "accrued_input_index", "=", "0", "current_output_index", "=", "0", "while", "(", "accrued_input_index", "<", "se...
Returns the current position in the partially formatted phone number of the character which was previously passed in as the parameter of input_digit(remember_position=True).
[ "Returns", "the", "current", "position", "in", "the", "partially", "formatted", "phone", "number", "of", "the", "character", "which", "was", "previously", "passed", "in", "as", "the", "parameter", "of", "input_digit", "(", "remember_position", "=", "True", ")", ...
python
train
materialsproject/pymatgen
pymatgen/analysis/nmr.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/nmr.py#L98-L108
def maryland_values(self): """ Returns: the Chemical shielding tensor in Maryland Notation """ pas=self.principal_axis_system sigma_iso=pas.trace() / 3 omega=np.diag(pas)[2] - np.diag(pas)[0] # There is a typo in equation 20 from Magn. Reson. Chem. 2008, 46, 582–598, the sign is wrong. # There correct order is presented in Solid State Nucl. Magn. Reson. 1993, 2, 285-288. kappa=3.0 * (np.diag(pas)[1] - sigma_iso) / omega return self.MarylandNotation(sigma_iso, omega, kappa)
[ "def", "maryland_values", "(", "self", ")", ":", "pas", "=", "self", ".", "principal_axis_system", "sigma_iso", "=", "pas", ".", "trace", "(", ")", "/", "3", "omega", "=", "np", ".", "diag", "(", "pas", ")", "[", "2", "]", "-", "np", ".", "diag", ...
Returns: the Chemical shielding tensor in Maryland Notation
[ "Returns", ":", "the", "Chemical", "shielding", "tensor", "in", "Maryland", "Notation" ]
python
train
Saledddar/pyunet
pyunet/util.py
https://github.com/Saledddar/pyunet/blob/ca5ccc32588fae8da43f968e7747d3f3da509507/pyunet/util.py#L33-L54
def find_file_regex(root_dir,re_expression,return_abs_path = True,search_sub_directories = True): ''' Finds all the files with the specified root directory with the name matching the regex expression. Args : root_dir : The root directory. re_expression : The regex expression. return_abs_path : If set to true, returns the absolute path of the files, else returns the name of the files. search_sub_directories : If set to true, searches sub directories recursivly. ''' compiled = re.compile(re_expression) result = [] for dirpath, dirnames, files in os.walk(root_dir) : #Select files matching the expression for file in files : if compiled.match(file): result.append(os.path.join(dirpath,file) if return_abs_path else file ) #Break if no sub-directories if not search_sub_directories : break return result
[ "def", "find_file_regex", "(", "root_dir", ",", "re_expression", ",", "return_abs_path", "=", "True", ",", "search_sub_directories", "=", "True", ")", ":", "compiled", "=", "re", ".", "compile", "(", "re_expression", ")", "result", "=", "[", "]", "for", "dir...
Finds all the files with the specified root directory with the name matching the regex expression. Args : root_dir : The root directory. re_expression : The regex expression. return_abs_path : If set to true, returns the absolute path of the files, else returns the name of the files. search_sub_directories : If set to true, searches sub directories recursivly.
[ "Finds", "all", "the", "files", "with", "the", "specified", "root", "directory", "with", "the", "name", "matching", "the", "regex", "expression", ".", "Args", ":", "root_dir", ":", "The", "root", "directory", ".", "re_expression", ":", "The", "regex", "expre...
python
train
utek/pyseaweed
pyseaweed/weed.py
https://github.com/utek/pyseaweed/blob/218049329885425a2b8370157fa44952e64516be/pyseaweed/weed.py#L50-L66
def get_file(self, fid): """Get file from WeedFS. Returns file content. May be problematic for large files as content is stored in memory. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Content of the file with provided fid or None if file doesn't exist on the server .. versionadded:: 0.3.1 """ url = self.get_file_url(fid) return self.conn.get_raw_data(url)
[ "def", "get_file", "(", "self", ",", "fid", ")", ":", "url", "=", "self", ".", "get_file_url", "(", "fid", ")", "return", "self", ".", "conn", ".", "get_raw_data", "(", "url", ")" ]
Get file from WeedFS. Returns file content. May be problematic for large files as content is stored in memory. Args: **fid**: File identifier <volume_id>,<file_name_hash> Returns: Content of the file with provided fid or None if file doesn't exist on the server .. versionadded:: 0.3.1
[ "Get", "file", "from", "WeedFS", "." ]
python
train
intelligenia/modeltranslation
modeltranslation/templatetags/modeltranslation_tags.py
https://github.com/intelligenia/modeltranslation/blob/64d6adeb537747321d5020efedf5d7e0d135862d/modeltranslation/templatetags/modeltranslation_tags.py#L15-L47
def get_translated_attribute(instance, attr): """ Wraps Django Model __getattribute__ method making translation in templates less painful """ # If its class has no translatable fields, returns attribute try: if not hasattr(instance._meta, "translatable_fields") or len(getattr(instance._meta,"translatable_fields"))==0: return getattr(instance, attr) except AttributeError: return instance # Translatable fields of this instance translatable_fields = instance._meta.translatable_fields # Current language cur_language = get_language() lang = cur_language.title().lower() # If current language is default language, returns attribute if lang == settings.LANGUAGE_CODE: return getattr(instance, attr) # Otherwise, if a translation is NOT needed for attr atribute, get attribute if not attr in translatable_fields: return getattr(instance, attr) # Gets field translations of this instance and return the translated attribute field_translation = _get_fieldtranslations(instance, field=attr, lang=lang) if field_translation: if not field_translation.is_fuzzy: return field_translation.translation return getattr(instance, attr)
[ "def", "get_translated_attribute", "(", "instance", ",", "attr", ")", ":", "# If its class has no translatable fields, returns attribute", "try", ":", "if", "not", "hasattr", "(", "instance", ".", "_meta", ",", "\"translatable_fields\"", ")", "or", "len", "(", "getatt...
Wraps Django Model __getattribute__ method making translation in templates less painful
[ "Wraps", "Django", "Model", "__getattribute__", "method", "making", "translation", "in", "templates", "less", "painful" ]
python
train
CiscoUcs/UcsPythonSDK
src/UcsSdk/UcsBase.py
https://github.com/CiscoUcs/UcsPythonSDK/blob/bf6b07d6abeacb922c92b198352eda4eb9e4629b/src/UcsSdk/UcsBase.py#L875-L902
def DecryptPassword(cipher, key): """ Decrypts the password using the given key with which the password was encrypted first. """ import base64 import hmac import sha from array import array H = UcsUtils.GetShaHash cipher = cipher + "\n" cipher = base64.decodestring(cipher) n = len(cipher) - 16 - 8 uhash = cipher[:16] passwordStream = cipher[16:-8] + "0000"[n & 3:] auth = cipher[-8:] k_enc, k_auth = H('enc' + key + uhash), H('auth' + key + uhash) vauth = hmac.new(cipher[-8:], k_auth, sha).digest()[:8] passwordStream = array('L', passwordStream) xkey = UcsUtils.Expandkey(k_enc, n + 4) for i in xrange(len(passwordStream)): passwordStream[i] = passwordStream[i] ^ xkey[i] decryptedPassword = passwordStream.tostring()[:n] return decryptedPassword
[ "def", "DecryptPassword", "(", "cipher", ",", "key", ")", ":", "import", "base64", "import", "hmac", "import", "sha", "from", "array", "import", "array", "H", "=", "UcsUtils", ".", "GetShaHash", "cipher", "=", "cipher", "+", "\"\\n\"", "cipher", "=", "base...
Decrypts the password using the given key with which the password was encrypted first.
[ "Decrypts", "the", "password", "using", "the", "given", "key", "with", "which", "the", "password", "was", "encrypted", "first", "." ]
python
train
googleapis/google-cloud-python
datastore/google/cloud/datastore/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/datastore/google/cloud/datastore/client.py#L296-L340
def get(self, key, missing=None, deferred=None, transaction=None, eventual=False): """Retrieve an entity from a single key (if it exists). .. note:: This is just a thin wrapper over :meth:`get_multi`. The backend API does not make a distinction between a single key or multiple keys in a lookup request. :type key: :class:`google.cloud.datastore.key.Key` :param key: The key to be retrieved from the datastore. :type missing: list :param missing: (Optional) If a list is passed, the key-only entities returned by the backend as "missing" will be copied into it. :type deferred: list :param deferred: (Optional) If a list is passed, the keys returned by the backend as "deferred" will be copied into it. :type transaction: :class:`~google.cloud.datastore.transaction.Transaction` :param transaction: (Optional) Transaction to use for read consistency. If not passed, uses current transaction, if set. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`google.cloud.datastore.entity.Entity` or ``NoneType`` :returns: The requested entity if it exists. :raises: :class:`ValueError` if eventual is True and in a transaction. """ entities = self.get_multi( keys=[key], missing=missing, deferred=deferred, transaction=transaction, eventual=eventual, ) if entities: return entities[0]
[ "def", "get", "(", "self", ",", "key", ",", "missing", "=", "None", ",", "deferred", "=", "None", ",", "transaction", "=", "None", ",", "eventual", "=", "False", ")", ":", "entities", "=", "self", ".", "get_multi", "(", "keys", "=", "[", "key", "]"...
Retrieve an entity from a single key (if it exists). .. note:: This is just a thin wrapper over :meth:`get_multi`. The backend API does not make a distinction between a single key or multiple keys in a lookup request. :type key: :class:`google.cloud.datastore.key.Key` :param key: The key to be retrieved from the datastore. :type missing: list :param missing: (Optional) If a list is passed, the key-only entities returned by the backend as "missing" will be copied into it. :type deferred: list :param deferred: (Optional) If a list is passed, the keys returned by the backend as "deferred" will be copied into it. :type transaction: :class:`~google.cloud.datastore.transaction.Transaction` :param transaction: (Optional) Transaction to use for read consistency. If not passed, uses current transaction, if set. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`google.cloud.datastore.entity.Entity` or ``NoneType`` :returns: The requested entity if it exists. :raises: :class:`ValueError` if eventual is True and in a transaction.
[ "Retrieve", "an", "entity", "from", "a", "single", "key", "(", "if", "it", "exists", ")", "." ]
python
train
apache/airflow
airflow/contrib/hooks/azure_fileshare_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L64-L81
def check_for_file(self, share_name, directory_name, file_name, **kwargs): """ Check if a file exists on Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.exists()` takes. :type kwargs: object :return: True if the file exists, False otherwise. :rtype: bool """ return self.connection.exists(share_name, directory_name, file_name, **kwargs)
[ "def", "check_for_file", "(", "self", ",", "share_name", ",", "directory_name", ",", "file_name", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "connection", ".", "exists", "(", "share_name", ",", "directory_name", ",", "file_name", ",", "*", "...
Check if a file exists on Azure File Share. :param share_name: Name of the share. :type share_name: str :param directory_name: Name of the directory. :type directory_name: str :param file_name: Name of the file. :type file_name: str :param kwargs: Optional keyword arguments that `FileService.exists()` takes. :type kwargs: object :return: True if the file exists, False otherwise. :rtype: bool
[ "Check", "if", "a", "file", "exists", "on", "Azure", "File", "Share", "." ]
python
test
RI-imaging/qpimage
qpimage/bg_estimate.py
https://github.com/RI-imaging/qpimage/blob/863c0fce5735b4c0ae369f75c0df9a33411b2bb2/qpimage/bg_estimate.py#L131-L139
def profile_tilt(data, mask): """Fit a 2D tilt to `data[mask]`""" params = lmfit.Parameters() params.add(name="mx", value=0) params.add(name="my", value=0) params.add(name="off", value=np.average(data[mask])) fr = lmfit.minimize(tilt_residual, params, args=(data, mask)) bg = tilt_model(fr.params, data.shape) return bg
[ "def", "profile_tilt", "(", "data", ",", "mask", ")", ":", "params", "=", "lmfit", ".", "Parameters", "(", ")", "params", ".", "add", "(", "name", "=", "\"mx\"", ",", "value", "=", "0", ")", "params", ".", "add", "(", "name", "=", "\"my\"", ",", ...
Fit a 2D tilt to `data[mask]`
[ "Fit", "a", "2D", "tilt", "to", "data", "[", "mask", "]" ]
python
train
toastdriven/alligator
alligator/__init__.py
https://github.com/toastdriven/alligator/blob/f18bcb35b350fc6b0886393f5246d69c892b36c7/alligator/__init__.py#L11-L22
def version(): """ Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string. """ short = '.'.join([str(bit) for bit in __version__[:3]]) return '-'.join([short] + [str(bit) for bit in __version__[3:]])
[ "def", "version", "(", ")", ":", "short", "=", "'.'", ".", "join", "(", "[", "str", "(", "bit", ")", "for", "bit", "in", "__version__", "[", ":", "3", "]", "]", ")", "return", "'-'", ".", "join", "(", "[", "short", "]", "+", "[", "str", "(", ...
Returns a human-readable version string. For official releases, it will follow a semver style (e.g. ``1.2.7``). For dev versions, it will have the semver style first, followed by hyphenated qualifiers (e.g. ``1.2.7-dev``). Returns a string.
[ "Returns", "a", "human", "-", "readable", "version", "string", "." ]
python
train
Erotemic/utool
utool/util_dict.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dict.py#L1282-L1300
def range_hist(items, bins): """ Bins items into a discrete histogram by values and/or ranges. items = [1, 2, 3, 4, 5, 6, 7] bins = [0, 1, 2, (3, float('inf'))] ut.range_hist(items, bins) """ big_hist = ut.dict_hist(items) hist = ut.odict([(b, 0) for b in bins]) for k, v in big_hist.items(): for b in bins: if isinstance(b, (list, tuple)): if k >= b[0] and k < b[1]: hist[b] += v elif k == b: hist[b] += v return hist
[ "def", "range_hist", "(", "items", ",", "bins", ")", ":", "big_hist", "=", "ut", ".", "dict_hist", "(", "items", ")", "hist", "=", "ut", ".", "odict", "(", "[", "(", "b", ",", "0", ")", "for", "b", "in", "bins", "]", ")", "for", "k", ",", "v"...
Bins items into a discrete histogram by values and/or ranges. items = [1, 2, 3, 4, 5, 6, 7] bins = [0, 1, 2, (3, float('inf'))] ut.range_hist(items, bins)
[ "Bins", "items", "into", "a", "discrete", "histogram", "by", "values", "and", "/", "or", "ranges", "." ]
python
train
saltstack/salt
salt/modules/smartos_imgadm.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/smartos_imgadm.py#L104-L114
def _split_docker_uuid(uuid): ''' Split a smartos docker uuid into repo and tag ''' if uuid: uuid = uuid.split(':') if len(uuid) == 2: tag = uuid[1] repo = uuid[0] return repo, tag return None, None
[ "def", "_split_docker_uuid", "(", "uuid", ")", ":", "if", "uuid", ":", "uuid", "=", "uuid", ".", "split", "(", "':'", ")", "if", "len", "(", "uuid", ")", "==", "2", ":", "tag", "=", "uuid", "[", "1", "]", "repo", "=", "uuid", "[", "0", "]", "...
Split a smartos docker uuid into repo and tag
[ "Split", "a", "smartos", "docker", "uuid", "into", "repo", "and", "tag" ]
python
train
project-rig/rig
rig/place_and_route/place/sa/python_kernel.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/place_and_route/place/sa/python_kernel.py#L295-L349
def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources, placements, machine): """Swap the positions of two sets of vertices. Parameters ---------- vas : [vertex, ...] A set of vertices currently at vas_location. vas_location : (x, y) vbs : [vertex, ...] A set of vertices currently at vbs_location. vbs_location : (x, y) l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} placements : {vertex: (x, y), ...} machine : :py:class:`rig.place_and_route.Machine` """ # Get the lists of vertices at either location vas_location2v = l2v[vas_location] vbs_location2v = l2v[vbs_location] # Get the resource availability at either location vas_resources = machine[vas_location] vbs_resources = machine[vbs_location] # Move all the vertices in vas into vbs. for va in vas: # Update the placements placements[va] = vbs_location # Update the location-to-vertex lookup vas_location2v.remove(va) vbs_location2v.append(va) # Update the resource consumption after the move resources = vertices_resources[va] vas_resources = add_resources(vas_resources, resources) vbs_resources = subtract_resources(vbs_resources, resources) for vb in vbs: # Update the placements placements[vb] = vas_location # Update the location-to-vertex lookup vbs_location2v.remove(vb) vas_location2v.append(vb) # Update the resource consumption after the move resources = vertices_resources[vb] vas_resources = subtract_resources(vas_resources, resources) vbs_resources = add_resources(vbs_resources, resources) # Update the resources in the machine machine[vas_location] = vas_resources machine[vbs_location] = vbs_resources
[ "def", "_swap", "(", "vas", ",", "vas_location", ",", "vbs", ",", "vbs_location", ",", "l2v", ",", "vertices_resources", ",", "placements", ",", "machine", ")", ":", "# Get the lists of vertices at either location", "vas_location2v", "=", "l2v", "[", "vas_location",...
Swap the positions of two sets of vertices. Parameters ---------- vas : [vertex, ...] A set of vertices currently at vas_location. vas_location : (x, y) vbs : [vertex, ...] A set of vertices currently at vbs_location. vbs_location : (x, y) l2v : {(x, y): [vertex, ...], ...} vertices_resources : {vertex: {resource: value, ...}, ...} placements : {vertex: (x, y), ...} machine : :py:class:`rig.place_and_route.Machine`
[ "Swap", "the", "positions", "of", "two", "sets", "of", "vertices", "." ]
python
train
inasafe/inasafe
safe/gui/tools/multi_buffer_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/multi_buffer_dialog.py#L104-L156
def accept(self): """Process the layer for multi buffering and generate a new layer. .. note:: This is called on OK click. """ # set parameter from dialog input_layer = self.layer.currentLayer() output_path = self.output_form.text() radius = self.get_classification() # monkey patch keywords so layer works on multi buffering function input_layer.keywords = {'inasafe_fields': {}} # run multi buffering self.output_layer = multi_buffering(input_layer, radius) # save output layer to data store and check whether user # provide the output path. if output_path: self.output_directory, self.output_filename = ( os.path.split(output_path)) self.output_filename, self.output_extension = ( os.path.splitext(self.output_filename)) # if user do not provide the output path, create a temporary file. else: self.output_directory = temp_dir(sub_dir='work') self.output_filename = ( unique_filename( prefix='hazard_layer', suffix='.geojson', dir=self.output_directory)) self.output_filename = os.path.split(self.output_filename)[1] self.output_filename, self.output_extension = ( os.path.splitext(self.output_filename)) self.data_store = Folder(self.output_directory) if self.output_extension == '.shp': self.data_store.default_vector_format = 'shp' elif self.output_extension == '.geojson': self.data_store.default_vector_format = 'geojson' self.data_store.add_layer(self.output_layer, self.output_filename) # add output layer to map canvas self.output_layer = self.data_store.layer(self.output_filename) QgsProject.instance().addMapLayers( [self.output_layer]) self.iface.setActiveLayer(self.output_layer) self.iface.zoomToActiveLayer() self.done(QtWidgets.QDialog.Accepted) if self.keyword_wizard_checkbox.isChecked(): self.launch_keyword_wizard()
[ "def", "accept", "(", "self", ")", ":", "# set parameter from dialog", "input_layer", "=", "self", ".", "layer", ".", "currentLayer", "(", ")", "output_path", "=", "self", ".", "output_form", ".", "text", "(", ")", "radius", "=", "self", ".", "get_classifica...
Process the layer for multi buffering and generate a new layer. .. note:: This is called on OK click.
[ "Process", "the", "layer", "for", "multi", "buffering", "and", "generate", "a", "new", "layer", "." ]
python
train
pylp/pylp
pylp/cli/run.py
https://github.com/pylp/pylp/blob/7ebaa55fbaf61cb8175f211dd41ef2928c22d4d4/pylp/cli/run.py#L18-L45
def run(path, tasks): """Run a pylpfile.""" # Test if the pylpfile exists readable_path = make_readable_path(path) if not os.path.isfile(path): logger.log(logger.red("Can't read pylpfile "), logger.magenta(readable_path)) sys.exit(-1) else: logger.log("Using pylpfile ", logger.magenta(readable_path)) # Run the pylpfile try: runpy.run_path(path, None, "pylpfile") except Exception as e: traceback.print_exc(file=sys.stdout) logger.log(logger.red("\nAn error has occurred during the execution of the pylpfile")) sys.exit(-1) # Start the tasks for name in tasks: pylp.start(name) # Wait until all task are executed loop = asyncio.get_event_loop() loop.run_until_complete(wait_and_quit(loop))
[ "def", "run", "(", "path", ",", "tasks", ")", ":", "# Test if the pylpfile exists", "readable_path", "=", "make_readable_path", "(", "path", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "path", ")", ":", "logger", ".", "log", "(", "logger", "...
Run a pylpfile.
[ "Run", "a", "pylpfile", "." ]
python
train
edx/edx-enterprise
enterprise/utils.py
https://github.com/edx/edx-enterprise/blob/aea91379ab0a87cd3bc798961fce28b60ee49a80/enterprise/utils.py#L543-L567
def traverse_pagination(response, endpoint): """ Traverse a paginated API response. Extracts and concatenates "results" (list of dict) returned by DRF-powered APIs. Arguments: response (Dict): Current response dict from service API endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client Returns: list of dict. """ results = response.get('results', []) next_page = response.get('next') while next_page: querystring = parse_qs(urlparse(next_page).query, keep_blank_values=True) response = endpoint.get(**querystring) results += response.get('results', []) next_page = response.get('next') return results
[ "def", "traverse_pagination", "(", "response", ",", "endpoint", ")", ":", "results", "=", "response", ".", "get", "(", "'results'", ",", "[", "]", ")", "next_page", "=", "response", ".", "get", "(", "'next'", ")", "while", "next_page", ":", "querystring", ...
Traverse a paginated API response. Extracts and concatenates "results" (list of dict) returned by DRF-powered APIs. Arguments: response (Dict): Current response dict from service API endpoint (slumber Resource object): slumber Resource object from edx-rest-api-client Returns: list of dict.
[ "Traverse", "a", "paginated", "API", "response", "." ]
python
valid
mayfield/syndicate
syndicate/data.py
https://github.com/mayfield/syndicate/blob/917af976dacb7377bdf0cb616f47e0df5afaff1a/syndicate/data.py#L40-L46
def parse_object(self, data): """ Look for datetime looking strings. """ for key, value in data.items(): if isinstance(value, (str, type(u''))) and \ self.strict_iso_match.match(value): data[key] = dateutil.parser.parse(value) return data
[ "def", "parse_object", "(", "self", ",", "data", ")", ":", "for", "key", ",", "value", "in", "data", ".", "items", "(", ")", ":", "if", "isinstance", "(", "value", ",", "(", "str", ",", "type", "(", "u''", ")", ")", ")", "and", "self", ".", "st...
Look for datetime looking strings.
[ "Look", "for", "datetime", "looking", "strings", "." ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/__init__.py#L262-L368
def update(self, friendly_name=values.unset, default_service_role_sid=values.unset, default_channel_role_sid=values.unset, default_channel_creator_role_sid=values.unset, read_status_enabled=values.unset, reachability_enabled=values.unset, typing_indicator_timeout=values.unset, consumption_report_interval=values.unset, notifications_new_message_enabled=values.unset, notifications_new_message_template=values.unset, notifications_new_message_sound=values.unset, notifications_new_message_badge_count_enabled=values.unset, notifications_added_to_channel_enabled=values.unset, notifications_added_to_channel_template=values.unset, notifications_added_to_channel_sound=values.unset, notifications_removed_from_channel_enabled=values.unset, notifications_removed_from_channel_template=values.unset, notifications_removed_from_channel_sound=values.unset, notifications_invited_to_channel_enabled=values.unset, notifications_invited_to_channel_template=values.unset, notifications_invited_to_channel_sound=values.unset, pre_webhook_url=values.unset, post_webhook_url=values.unset, webhook_method=values.unset, webhook_filters=values.unset, limits_channel_members=values.unset, limits_user_channels=values.unset, media_compatibility_message=values.unset, pre_webhook_retry_count=values.unset, post_webhook_retry_count=values.unset, notifications_log_enabled=values.unset): """ Update the ServiceInstance :param unicode friendly_name: A string to describe the resource :param unicode default_service_role_sid: The service role assigned to users when they are added to the service :param unicode default_channel_role_sid: The channel role assigned to users when they are added to a channel :param unicode default_channel_creator_role_sid: The channel role assigned to a channel creator when they join a new channel :param bool read_status_enabled: Whether to enable the Message Consumption Horizon feature :param bool reachability_enabled: Whether to enable the Reachability Indicator feature for this Service instance :param unicode typing_indicator_timeout: How long in seconds to wait before assuming the user is no longer typing :param unicode consumption_report_interval: DEPRECATED :param bool notifications_new_message_enabled: Whether to send a notification when a new message is added to a channel :param unicode notifications_new_message_template: The template to use to create the notification text displayed when a new message is added to a channel :param unicode notifications_new_message_sound: The name of the sound to play when a new message is added to a channel :param bool notifications_new_message_badge_count_enabled: Whether the new message badge is enabled :param bool notifications_added_to_channel_enabled: Whether to send a notification when a member is added to a channel :param unicode notifications_added_to_channel_template: The template to use to create the notification text displayed when a member is added to a channel :param unicode notifications_added_to_channel_sound: The name of the sound to play when a member is added to a channel :param bool notifications_removed_from_channel_enabled: Whether to send a notification to a user when they are removed from a channel :param unicode notifications_removed_from_channel_template: The template to use to create the notification text displayed to a user when they are removed :param unicode notifications_removed_from_channel_sound: The name of the sound to play to a user when they are removed from a channel :param bool notifications_invited_to_channel_enabled: Whether to send a notification when a user is invited to a channel :param unicode notifications_invited_to_channel_template: The template to use to create the notification text displayed when a user is invited to a channel :param unicode notifications_invited_to_channel_sound: The name of the sound to play when a user is invited to a channel :param unicode pre_webhook_url: The webhook URL for pre-event webhooks :param unicode post_webhook_url: The URL for post-event webhooks :param unicode webhook_method: The HTTP method to use for both PRE and POST webhooks :param unicode webhook_filters: The list of WebHook events that are enabled for this Service instance :param unicode limits_channel_members: The maximum number of Members that can be added to Channels within this Service :param unicode limits_user_channels: The maximum number of Channels Users can be a Member of within this Service :param unicode media_compatibility_message: The message to send when a media message has no text :param unicode pre_webhook_retry_count: Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses :param unicode post_webhook_retry_count: The number of times calls to the `post_webhook_url` will be retried :param bool notifications_log_enabled: Whether to log notifications :returns: Updated ServiceInstance :rtype: twilio.rest.chat.v2.service.ServiceInstance """ data = values.of({ 'FriendlyName': friendly_name, 'DefaultServiceRoleSid': default_service_role_sid, 'DefaultChannelRoleSid': default_channel_role_sid, 'DefaultChannelCreatorRoleSid': default_channel_creator_role_sid, 'ReadStatusEnabled': read_status_enabled, 'ReachabilityEnabled': reachability_enabled, 'TypingIndicatorTimeout': typing_indicator_timeout, 'ConsumptionReportInterval': consumption_report_interval, 'Notifications.NewMessage.Enabled': notifications_new_message_enabled, 'Notifications.NewMessage.Template': notifications_new_message_template, 'Notifications.NewMessage.Sound': notifications_new_message_sound, 'Notifications.NewMessage.BadgeCountEnabled': notifications_new_message_badge_count_enabled, 'Notifications.AddedToChannel.Enabled': notifications_added_to_channel_enabled, 'Notifications.AddedToChannel.Template': notifications_added_to_channel_template, 'Notifications.AddedToChannel.Sound': notifications_added_to_channel_sound, 'Notifications.RemovedFromChannel.Enabled': notifications_removed_from_channel_enabled, 'Notifications.RemovedFromChannel.Template': notifications_removed_from_channel_template, 'Notifications.RemovedFromChannel.Sound': notifications_removed_from_channel_sound, 'Notifications.InvitedToChannel.Enabled': notifications_invited_to_channel_enabled, 'Notifications.InvitedToChannel.Template': notifications_invited_to_channel_template, 'Notifications.InvitedToChannel.Sound': notifications_invited_to_channel_sound, 'PreWebhookUrl': pre_webhook_url, 'PostWebhookUrl': post_webhook_url, 'WebhookMethod': webhook_method, 'WebhookFilters': serialize.map(webhook_filters, lambda e: e), 'Limits.ChannelMembers': limits_channel_members, 'Limits.UserChannels': limits_user_channels, 'Media.CompatibilityMessage': media_compatibility_message, 'PreWebhookRetryCount': pre_webhook_retry_count, 'PostWebhookRetryCount': post_webhook_retry_count, 'Notifications.LogEnabled': notifications_log_enabled, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ServiceInstance(self._version, payload, sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "default_service_role_sid", "=", "values", ".", "unset", ",", "default_channel_role_sid", "=", "values", ".", "unset", ",", "default_channel_creator_role_sid", "=", "values", "."...
Update the ServiceInstance :param unicode friendly_name: A string to describe the resource :param unicode default_service_role_sid: The service role assigned to users when they are added to the service :param unicode default_channel_role_sid: The channel role assigned to users when they are added to a channel :param unicode default_channel_creator_role_sid: The channel role assigned to a channel creator when they join a new channel :param bool read_status_enabled: Whether to enable the Message Consumption Horizon feature :param bool reachability_enabled: Whether to enable the Reachability Indicator feature for this Service instance :param unicode typing_indicator_timeout: How long in seconds to wait before assuming the user is no longer typing :param unicode consumption_report_interval: DEPRECATED :param bool notifications_new_message_enabled: Whether to send a notification when a new message is added to a channel :param unicode notifications_new_message_template: The template to use to create the notification text displayed when a new message is added to a channel :param unicode notifications_new_message_sound: The name of the sound to play when a new message is added to a channel :param bool notifications_new_message_badge_count_enabled: Whether the new message badge is enabled :param bool notifications_added_to_channel_enabled: Whether to send a notification when a member is added to a channel :param unicode notifications_added_to_channel_template: The template to use to create the notification text displayed when a member is added to a channel :param unicode notifications_added_to_channel_sound: The name of the sound to play when a member is added to a channel :param bool notifications_removed_from_channel_enabled: Whether to send a notification to a user when they are removed from a channel :param unicode notifications_removed_from_channel_template: The template to use to create the notification text displayed to a user when they are removed :param unicode notifications_removed_from_channel_sound: The name of the sound to play to a user when they are removed from a channel :param bool notifications_invited_to_channel_enabled: Whether to send a notification when a user is invited to a channel :param unicode notifications_invited_to_channel_template: The template to use to create the notification text displayed when a user is invited to a channel :param unicode notifications_invited_to_channel_sound: The name of the sound to play when a user is invited to a channel :param unicode pre_webhook_url: The webhook URL for pre-event webhooks :param unicode post_webhook_url: The URL for post-event webhooks :param unicode webhook_method: The HTTP method to use for both PRE and POST webhooks :param unicode webhook_filters: The list of WebHook events that are enabled for this Service instance :param unicode limits_channel_members: The maximum number of Members that can be added to Channels within this Service :param unicode limits_user_channels: The maximum number of Channels Users can be a Member of within this Service :param unicode media_compatibility_message: The message to send when a media message has no text :param unicode pre_webhook_retry_count: Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses :param unicode post_webhook_retry_count: The number of times calls to the `post_webhook_url` will be retried :param bool notifications_log_enabled: Whether to log notifications :returns: Updated ServiceInstance :rtype: twilio.rest.chat.v2.service.ServiceInstance
[ "Update", "the", "ServiceInstance" ]
python
train
drongo-framework/drongo
drongo/utils/dict2.py
https://github.com/drongo-framework/drongo/blob/487edb370ae329f370bcf3b433ed3f28ba4c1d8c/drongo/utils/dict2.py#L25-L41
def get_property(self, prop): """Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None` """ prop = prop.split('.') root = self for p in prop: if p in root: root = root[p] else: return None return root
[ "def", "get_property", "(", "self", ",", "prop", ")", ":", "prop", "=", "prop", ".", "split", "(", "'.'", ")", "root", "=", "self", "for", "p", "in", "prop", ":", "if", "p", "in", "root", ":", "root", "=", "root", "[", "p", "]", "else", ":", ...
Access nested value using dot separated keys Args: prop (:obj:`str`): Property in the form of dot separated keys Returns: Property value if exists, else `None`
[ "Access", "nested", "value", "using", "dot", "separated", "keys" ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/imphook2.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/imphook2.py#L211-L237
def _ResolveRelativeImport(name, package): """Resolves a relative import into an absolute path. This is mostly an adapted version of the logic found in the backported version of import_module in Python 2.7. https://github.com/python/cpython/blob/2.7/Lib/importlib/__init__.py Args: name: relative name imported, such as '.a' or '..b.c' package: absolute package path, such as 'a.b.c.d.e' Returns: The absolute path of the name to be imported, or None if it is invalid. Examples: _ResolveRelativeImport('.c', 'a.b') -> 'a.b.c' _ResolveRelativeImport('..c', 'a.b') -> 'a.c' _ResolveRelativeImport('...c', 'a.c') -> None """ level = sum(1 for c in itertools.takewhile(lambda c: c == '.', name)) if level == 1: return package + name else: parts = package.split('.')[:-(level - 1)] if not parts: return None parts.append(name[level:]) return '.'.join(parts)
[ "def", "_ResolveRelativeImport", "(", "name", ",", "package", ")", ":", "level", "=", "sum", "(", "1", "for", "c", "in", "itertools", ".", "takewhile", "(", "lambda", "c", ":", "c", "==", "'.'", ",", "name", ")", ")", "if", "level", "==", "1", ":",...
Resolves a relative import into an absolute path. This is mostly an adapted version of the logic found in the backported version of import_module in Python 2.7. https://github.com/python/cpython/blob/2.7/Lib/importlib/__init__.py Args: name: relative name imported, such as '.a' or '..b.c' package: absolute package path, such as 'a.b.c.d.e' Returns: The absolute path of the name to be imported, or None if it is invalid. Examples: _ResolveRelativeImport('.c', 'a.b') -> 'a.b.c' _ResolveRelativeImport('..c', 'a.b') -> 'a.c' _ResolveRelativeImport('...c', 'a.c') -> None
[ "Resolves", "a", "relative", "import", "into", "an", "absolute", "path", "." ]
python
train
ceph/ceph-deploy
ceph_deploy/util/net.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/ceph_deploy/util/net.py#L61-L68
def in_subnet(cidr, addrs=None): """ Returns True if host is within specified subnet, otherwise False """ for address in addrs: if ip_in_subnet(address, cidr): return True return False
[ "def", "in_subnet", "(", "cidr", ",", "addrs", "=", "None", ")", ":", "for", "address", "in", "addrs", ":", "if", "ip_in_subnet", "(", "address", ",", "cidr", ")", ":", "return", "True", "return", "False" ]
Returns True if host is within specified subnet, otherwise False
[ "Returns", "True", "if", "host", "is", "within", "specified", "subnet", "otherwise", "False" ]
python
train
sethmlarson/virtualbox-python
virtualbox/library.py
https://github.com/sethmlarson/virtualbox-python/blob/706c8e3f6e3aee17eb06458e73cbb4bc2d37878b/virtualbox/library.py#L14931-L14941
def hot_plug_cpu(self, cpu): """Plugs a CPU into the machine. in cpu of type int The CPU id to insert. """ if not isinstance(cpu, baseinteger): raise TypeError("cpu can only be an instance of type baseinteger") self._call("hotPlugCPU", in_p=[cpu])
[ "def", "hot_plug_cpu", "(", "self", ",", "cpu", ")", ":", "if", "not", "isinstance", "(", "cpu", ",", "baseinteger", ")", ":", "raise", "TypeError", "(", "\"cpu can only be an instance of type baseinteger\"", ")", "self", ".", "_call", "(", "\"hotPlugCPU\"", ","...
Plugs a CPU into the machine. in cpu of type int The CPU id to insert.
[ "Plugs", "a", "CPU", "into", "the", "machine", "." ]
python
train
MediaFire/mediafire-python-open-sdk
mediafire/api.py
https://github.com/MediaFire/mediafire-python-open-sdk/blob/8f1f23db1b16f16e026f5c6777aec32d00baa05f/mediafire/api.py#L705-L720
def file_update_file(self, quick_key, file_extension=None, filename=None, description=None, mtime=None, privacy=None, timezone=None): """file/update_file http://www.mediafire.com/developers/core_api/1.3/file/#update_file """ return self.request('file/update', QueryParams({ 'quick_key': quick_key, 'file_extension': file_extension, 'filename': filename, 'description': description, 'mtime': mtime, 'privacy': privacy, 'timezone': timezone }))
[ "def", "file_update_file", "(", "self", ",", "quick_key", ",", "file_extension", "=", "None", ",", "filename", "=", "None", ",", "description", "=", "None", ",", "mtime", "=", "None", ",", "privacy", "=", "None", ",", "timezone", "=", "None", ")", ":", ...
file/update_file http://www.mediafire.com/developers/core_api/1.3/file/#update_file
[ "file", "/", "update_file" ]
python
train
bitesofcode/projexui
projexui/widgets/xviewwidget/xview.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xview.py#L504-L514
def setMinimumHeight(self, height): """ Sets the minimum height value to the inputed height and emits the \ sizeConstraintChanged signal. :param height | <int> """ super(XView, self).setMinimumHeight(height) if ( not self.signalsBlocked() ): self.sizeConstraintChanged.emit()
[ "def", "setMinimumHeight", "(", "self", ",", "height", ")", ":", "super", "(", "XView", ",", "self", ")", ".", "setMinimumHeight", "(", "height", ")", "if", "(", "not", "self", ".", "signalsBlocked", "(", ")", ")", ":", "self", ".", "sizeConstraintChange...
Sets the minimum height value to the inputed height and emits the \ sizeConstraintChanged signal. :param height | <int>
[ "Sets", "the", "minimum", "height", "value", "to", "the", "inputed", "height", "and", "emits", "the", "\\", "sizeConstraintChanged", "signal", ".", ":", "param", "height", "|", "<int", ">" ]
python
train
base4sistemas/satcomum
satcomum/br.py
https://github.com/base4sistemas/satcomum/blob/b42bec06cb0fb0ad2f6b1a2644a1e8fc8403f2c3/satcomum/br.py#L230-L237
def as_cpf(numero): """Formata um número de CPF. Se o número não for um CPF válido apenas retorna o argumento sem qualquer modificação. """ _num = digitos(numero) if is_cpf(_num): return '{}.{}.{}-{}'.format(_num[:3], _num[3:6], _num[6:9], _num[9:]) return numero
[ "def", "as_cpf", "(", "numero", ")", ":", "_num", "=", "digitos", "(", "numero", ")", "if", "is_cpf", "(", "_num", ")", ":", "return", "'{}.{}.{}-{}'", ".", "format", "(", "_num", "[", ":", "3", "]", ",", "_num", "[", "3", ":", "6", "]", ",", "...
Formata um número de CPF. Se o número não for um CPF válido apenas retorna o argumento sem qualquer modificação.
[ "Formata", "um", "número", "de", "CPF", ".", "Se", "o", "número", "não", "for", "um", "CPF", "válido", "apenas", "retorna", "o", "argumento", "sem", "qualquer", "modificação", "." ]
python
train
pytroll/satpy
satpy/dataset.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/dataset.py#L293-L301
def replace_anc(dataset, parent_dataset): """Replace *dataset* the *parent_dataset*'s `ancillary_variables` field.""" if parent_dataset is None: return current_dsid = DatasetID.from_dict(dataset.attrs) for idx, ds in enumerate(parent_dataset.attrs['ancillary_variables']): if current_dsid == DatasetID.from_dict(ds.attrs): parent_dataset.attrs['ancillary_variables'][idx] = dataset return
[ "def", "replace_anc", "(", "dataset", ",", "parent_dataset", ")", ":", "if", "parent_dataset", "is", "None", ":", "return", "current_dsid", "=", "DatasetID", ".", "from_dict", "(", "dataset", ".", "attrs", ")", "for", "idx", ",", "ds", "in", "enumerate", "...
Replace *dataset* the *parent_dataset*'s `ancillary_variables` field.
[ "Replace", "*", "dataset", "*", "the", "*", "parent_dataset", "*", "s", "ancillary_variables", "field", "." ]
python
train
michaelpb/omnic
omnic/cli/commandparser.py
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/cli/commandparser.py#L64-L70
def parse_args_to_action_args(self, argv=None): ''' Parses args and returns an action and the args that were parsed ''' args = self.parse_args(argv) action = self.subcommands[args.subcommand][1] return action, args
[ "def", "parse_args_to_action_args", "(", "self", ",", "argv", "=", "None", ")", ":", "args", "=", "self", ".", "parse_args", "(", "argv", ")", "action", "=", "self", ".", "subcommands", "[", "args", ".", "subcommand", "]", "[", "1", "]", "return", "act...
Parses args and returns an action and the args that were parsed
[ "Parses", "args", "and", "returns", "an", "action", "and", "the", "args", "that", "were", "parsed" ]
python
train
aio-libs/aiodocker
aiodocker/utils.py
https://github.com/aio-libs/aiodocker/blob/88d0285ddba8e606ff684278e0a831347209189c/aiodocker/utils.py#L214-L227
def clean_filters(filters: Mapping = None) -> str: """ Checks the values inside `filters` https://docs.docker.com/engine/api/v1.29/#operation/ServiceList Returns a new dictionary in the format `map[string][]string` jsonized """ if filters and isinstance(filters, dict): for k, v in filters.items(): if not isinstance(v, list): v = [v] filters[k] = v return json.dumps(filters)
[ "def", "clean_filters", "(", "filters", ":", "Mapping", "=", "None", ")", "->", "str", ":", "if", "filters", "and", "isinstance", "(", "filters", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "filters", ".", "items", "(", ")", ":", "if", "not"...
Checks the values inside `filters` https://docs.docker.com/engine/api/v1.29/#operation/ServiceList Returns a new dictionary in the format `map[string][]string` jsonized
[ "Checks", "the", "values", "inside", "filters", "https", ":", "//", "docs", ".", "docker", ".", "com", "/", "engine", "/", "api", "/", "v1", ".", "29", "/", "#operation", "/", "ServiceList", "Returns", "a", "new", "dictionary", "in", "the", "format", "...
python
train
maweigert/gputools
gputools/convolve/convolve.py
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L116-L151
def _convolve3_old(data, h, dev=None): """convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge. h is converted to float32 if dev == None the default one is used """ if dev is None: dev = get_device() if dev is None: raise ValueError("no OpenCLDevice found...") dtype = data.dtype.type dtypes_options = {np.float32: "", np.uint16: "-D SHORTTYPE"} if not dtype in dtypes_options: raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys())) prog = OCLProgram(abspath("kernels/convolve3.cl"), build_options=dtypes_options[dtype]) hbuf = OCLArray.from_array(h.astype(np.float32)) img = OCLImage.from_array(data) res = OCLArray.empty(data.shape, dtype=np.float32) Ns = [np.int32(n) for n in data.shape + h.shape] prog.run_kernel("convolve3d", img.shape, None, img, hbuf.data, res.data, *Ns) return res.get()
[ "def", "_convolve3_old", "(", "data", ",", "h", ",", "dev", "=", "None", ")", ":", "if", "dev", "is", "None", ":", "dev", "=", "get_device", "(", ")", "if", "dev", "is", "None", ":", "raise", "ValueError", "(", "\"no OpenCLDevice found...\"", ")", "dty...
convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge. h is converted to float32 if dev == None the default one is used
[ "convolves", "3d", "data", "with", "kernel", "h", "on", "the", "GPU", "Device", "dev", "boundary", "conditions", "are", "clamping", "to", "edge", ".", "h", "is", "converted", "to", "float32" ]
python
train
marl/jams
jams/eval.py
https://github.com/marl/jams/blob/b16778399b9528efbd71434842a079f7691a7a66/jams/eval.py#L279-L319
def hierarchy(ref, est, **kwargs): r'''Multi-level segmentation evaluation Parameters ---------- ref : jams.Annotation Reference annotation object est : jams.Annotation Estimated annotation object kwargs Additional keyword arguments Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. See Also -------- mir_eval.hierarchy.evaluate Examples -------- >>> # Load in the JAMS objects >>> ref_jam = jams.load('reference.jams') >>> est_jam = jams.load('estimated.jams') >>> # Select the first relevant annotations >>> ref_ann = ref_jam.search(namespace='multi_segment')[0] >>> est_ann = est_jam.search(namespace='multi_segment')[0] >>> scores = jams.eval.hierarchy(ref_ann, est_ann) ''' namespace = 'multi_segment' ref = coerce_annotation(ref, namespace) est = coerce_annotation(est, namespace) ref_hier, ref_hier_lab = hierarchy_flatten(ref) est_hier, est_hier_lab = hierarchy_flatten(est) return mir_eval.hierarchy.evaluate(ref_hier, ref_hier_lab, est_hier, est_hier_lab, **kwargs)
[ "def", "hierarchy", "(", "ref", ",", "est", ",", "*", "*", "kwargs", ")", ":", "namespace", "=", "'multi_segment'", "ref", "=", "coerce_annotation", "(", "ref", ",", "namespace", ")", "est", "=", "coerce_annotation", "(", "est", ",", "namespace", ")", "r...
r'''Multi-level segmentation evaluation Parameters ---------- ref : jams.Annotation Reference annotation object est : jams.Annotation Estimated annotation object kwargs Additional keyword arguments Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. See Also -------- mir_eval.hierarchy.evaluate Examples -------- >>> # Load in the JAMS objects >>> ref_jam = jams.load('reference.jams') >>> est_jam = jams.load('estimated.jams') >>> # Select the first relevant annotations >>> ref_ann = ref_jam.search(namespace='multi_segment')[0] >>> est_ann = est_jam.search(namespace='multi_segment')[0] >>> scores = jams.eval.hierarchy(ref_ann, est_ann)
[ "r", "Multi", "-", "level", "segmentation", "evaluation" ]
python
valid
gmr/tinman
tinman/handlers/base.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/base.py#L204-L219
def start_session(self): """Start the session. Invoke in your @gen.coroutine wrapped prepare method like:: result = yield gen.Task(self.start_session) :rtype: bool """ self.session = self._session_start() result = yield gen.Task(self.session.fetch) self._set_session_cookie() if not self.session.get('ip_address'): self.session.ip_address = self.request.remote_ip self._last_values() raise gen.Return(result)
[ "def", "start_session", "(", "self", ")", ":", "self", ".", "session", "=", "self", ".", "_session_start", "(", ")", "result", "=", "yield", "gen", ".", "Task", "(", "self", ".", "session", ".", "fetch", ")", "self", ".", "_set_session_cookie", "(", ")...
Start the session. Invoke in your @gen.coroutine wrapped prepare method like:: result = yield gen.Task(self.start_session) :rtype: bool
[ "Start", "the", "session", ".", "Invoke", "in", "your", "@gen", ".", "coroutine", "wrapped", "prepare", "method", "like", "::" ]
python
train
horazont/aioxmpp
aioxmpp/forms/fields.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/forms/fields.py#L235-L288
def render(self, *, use_local_metadata=True): """ Return a :class:`~.Field` containing the values and metadata set in the field. :param use_local_metadata: if true, the description, label and required metadata can be sourced from the field descriptor associated with this bound field. :type use_local_metadata: :class:`bool` :return: A new :class:`~.Field` instance. The returned object uses the values accessible through this object; that means, any values set for e.g. :attr:`desc` take precedence over the values declared at the class level. If `use_local_metadata` is false, values declared at the class level are not used if no local values are declared. This is useful when generating a reply to a form received by a peer, as it avoids sending a modified form. This method is must be overriden and is thus marked abstract. However, when called from a subclass, it creates the :class:`~.Field` instance and initialises its :attr:`~.Field.var`, :attr:`~.Field.type_`, :attr:`~.Field.desc`, :attr:`~.Field.required` and :attr:`~.Field.label` attributes and returns the result. Subclasses are supposed to override this method, call the base implementation through :func:`super` to obtain the :class:`~.Field` instance and then fill in the values and/or options. """ result = forms_xso.Field( var=self.field.var, type_=self.field.FIELD_TYPE, ) if use_local_metadata: result.desc = self.desc result.label = self.label result.required = self.required else: try: result.desc = self._desc except AttributeError: pass try: result.label = self._label except AttributeError: pass try: result.required = self._required except AttributeError: pass return result
[ "def", "render", "(", "self", ",", "*", ",", "use_local_metadata", "=", "True", ")", ":", "result", "=", "forms_xso", ".", "Field", "(", "var", "=", "self", ".", "field", ".", "var", ",", "type_", "=", "self", ".", "field", ".", "FIELD_TYPE", ",", ...
Return a :class:`~.Field` containing the values and metadata set in the field. :param use_local_metadata: if true, the description, label and required metadata can be sourced from the field descriptor associated with this bound field. :type use_local_metadata: :class:`bool` :return: A new :class:`~.Field` instance. The returned object uses the values accessible through this object; that means, any values set for e.g. :attr:`desc` take precedence over the values declared at the class level. If `use_local_metadata` is false, values declared at the class level are not used if no local values are declared. This is useful when generating a reply to a form received by a peer, as it avoids sending a modified form. This method is must be overriden and is thus marked abstract. However, when called from a subclass, it creates the :class:`~.Field` instance and initialises its :attr:`~.Field.var`, :attr:`~.Field.type_`, :attr:`~.Field.desc`, :attr:`~.Field.required` and :attr:`~.Field.label` attributes and returns the result. Subclasses are supposed to override this method, call the base implementation through :func:`super` to obtain the :class:`~.Field` instance and then fill in the values and/or options.
[ "Return", "a", ":", "class", ":", "~", ".", "Field", "containing", "the", "values", "and", "metadata", "set", "in", "the", "field", "." ]
python
train
inveniosoftware/invenio-github
invenio_github/api.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/api.py#L272-L284
def remove_hook(self, repo_id, name): """Remove repository hook.""" ghrepo = self.api.repository_with_id(repo_id) if ghrepo: hooks = (h for h in ghrepo.hooks() if h.config.get('url', '') == self.webhook_url) hook = next(hooks, None) if not hook or hook.delete(): Repository.disable(user_id=self.user_id, github_id=repo_id, name=name) return True return False
[ "def", "remove_hook", "(", "self", ",", "repo_id", ",", "name", ")", ":", "ghrepo", "=", "self", ".", "api", ".", "repository_with_id", "(", "repo_id", ")", "if", "ghrepo", ":", "hooks", "=", "(", "h", "for", "h", "in", "ghrepo", ".", "hooks", "(", ...
Remove repository hook.
[ "Remove", "repository", "hook", "." ]
python
train
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L62-L87
def create_source(name, url, backend, description=None, frequency=DEFAULT_HARVEST_FREQUENCY, owner=None, organization=None, config=None, ): '''Create a new harvest source''' if owner and not isinstance(owner, User): owner = User.get(owner) if organization and not isinstance(organization, Organization): organization = Organization.get(organization) source = HarvestSource.objects.create( name=name, url=url, backend=backend, description=description, frequency=frequency or DEFAULT_HARVEST_FREQUENCY, owner=owner, organization=organization, config=config, ) signals.harvest_source_created.send(source) return source
[ "def", "create_source", "(", "name", ",", "url", ",", "backend", ",", "description", "=", "None", ",", "frequency", "=", "DEFAULT_HARVEST_FREQUENCY", ",", "owner", "=", "None", ",", "organization", "=", "None", ",", "config", "=", "None", ",", ")", ":", ...
Create a new harvest source
[ "Create", "a", "new", "harvest", "source" ]
python
train
Genida/django-meerkat
src/meerkat/utils/url.py
https://github.com/Genida/django-meerkat/blob/486502a75bb0800266db785fd32717d8c0eb8deb/src/meerkat/utils/url.py#L43-L63
def url_is(white_list): """ Function generator. Args: white_list (dict): dict with PREFIXES and CONSTANTS keys (list values). Returns: func: a function to check if a URL is... """ def func(url): prefixes = white_list.get('PREFIXES', ()) for prefix in prefixes: if url.startswith(prefix): return True constants = white_list.get('CONSTANTS', ()) for exact_url in constants: if url == exact_url: return True return False return func
[ "def", "url_is", "(", "white_list", ")", ":", "def", "func", "(", "url", ")", ":", "prefixes", "=", "white_list", ".", "get", "(", "'PREFIXES'", ",", "(", ")", ")", "for", "prefix", "in", "prefixes", ":", "if", "url", ".", "startswith", "(", "prefix"...
Function generator. Args: white_list (dict): dict with PREFIXES and CONSTANTS keys (list values). Returns: func: a function to check if a URL is...
[ "Function", "generator", "." ]
python
train
QualiSystems/cloudshell-networking-devices
cloudshell/devices/autoload/autoload_builder.py
https://github.com/QualiSystems/cloudshell-networking-devices/blob/009aab33edb30035b52fe10dbb91db61c95ba4d9/cloudshell/devices/autoload/autoload_builder.py#L23-L49
def _validate_build_resource_structure(autoload_resource): """Validate resource structure :param dict autoload_resource: :return correct autoload resource structure :rtype: dict """ result = {} for resource_prefix, resources in autoload_resource.iteritems(): max_free_index = max(map(int, resources)) + 1 or 1 for index, sub_resources in resources.iteritems(): if not index or index == -1: index = max_free_index max_free_index += 1 if len(sub_resources) > 1: result["{0}{1}".format(resource_prefix, index)] = sub_resources[0] for resource in sub_resources[1:]: result["{0}{1}".format(resource_prefix, str(max_free_index))] = resource max_free_index += 1 else: result["{0}{1}".format(resource_prefix, index)] = sub_resources[0] return result
[ "def", "_validate_build_resource_structure", "(", "autoload_resource", ")", ":", "result", "=", "{", "}", "for", "resource_prefix", ",", "resources", "in", "autoload_resource", ".", "iteritems", "(", ")", ":", "max_free_index", "=", "max", "(", "map", "(", "int"...
Validate resource structure :param dict autoload_resource: :return correct autoload resource structure :rtype: dict
[ "Validate", "resource", "structure" ]
python
train
MisterWil/skybellpy
skybellpy/device.py
https://github.com/MisterWil/skybellpy/blob/ac966d9f590cda7654f6de7eecc94e2103459eef/skybellpy/device.py#L270-L280
def motion_sensor(self, enabled): """Set the motion sensor state.""" if enabled is True: value = CONST.SETTINGS_MOTION_POLICY_ON elif enabled is False: value = CONST.SETTINGS_MOTION_POLICY_OFF else: raise SkybellException(ERROR.INVALID_SETTING_VALUE, (CONST.SETTINGS_MOTION_POLICY, enabled)) self._set_setting({CONST.SETTINGS_MOTION_POLICY: value})
[ "def", "motion_sensor", "(", "self", ",", "enabled", ")", ":", "if", "enabled", "is", "True", ":", "value", "=", "CONST", ".", "SETTINGS_MOTION_POLICY_ON", "elif", "enabled", "is", "False", ":", "value", "=", "CONST", ".", "SETTINGS_MOTION_POLICY_OFF", "else",...
Set the motion sensor state.
[ "Set", "the", "motion", "sensor", "state", "." ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L3867-L3874
def partition_name_has_valid_characters(self, part_vals, throw_exception): """ Parameters: - part_vals - throw_exception """ self.send_partition_name_has_valid_characters(part_vals, throw_exception) return self.recv_partition_name_has_valid_characters()
[ "def", "partition_name_has_valid_characters", "(", "self", ",", "part_vals", ",", "throw_exception", ")", ":", "self", ".", "send_partition_name_has_valid_characters", "(", "part_vals", ",", "throw_exception", ")", "return", "self", ".", "recv_partition_name_has_valid_chara...
Parameters: - part_vals - throw_exception
[ "Parameters", ":", "-", "part_vals", "-", "throw_exception" ]
python
train
saltstack/salt
salt/engines/libvirt_events.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/libvirt_events.py#L424-L431
def _domain_event_agent_lifecycle_cb(conn, domain, state, reason, opaque): ''' Domain agent lifecycle events handler ''' _salt_send_domain_event(opaque, conn, domain, opaque['event'], { 'state': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_STATE_', state), 'reason': _get_libvirt_enum_string('VIR_CONNECT_DOMAIN_EVENT_AGENT_LIFECYCLE_REASON_', reason) })
[ "def", "_domain_event_agent_lifecycle_cb", "(", "conn", ",", "domain", ",", "state", ",", "reason", ",", "opaque", ")", ":", "_salt_send_domain_event", "(", "opaque", ",", "conn", ",", "domain", ",", "opaque", "[", "'event'", "]", ",", "{", "'state'", ":", ...
Domain agent lifecycle events handler
[ "Domain", "agent", "lifecycle", "events", "handler" ]
python
train
oasiswork/zimsoap
zimsoap/client.py
https://github.com/oasiswork/zimsoap/blob/d1ea2eb4d50f263c9a16e5549af03f1eff3e295e/zimsoap/client.py#L429-L442
def modify_signature(self, signature): """ Modify an existing signature Can modify the content, contenttype and name. An unset attribute will not delete the attribute but leave it untouched. :param: signature a zobject.Signature object, with modified content/contentype/name, the id should be present and valid, the name does not allows to identify the signature for that operation. """ # if no content is specified, just use a selector (id/name) dic = signature.to_creator(for_modify=True) self.request('ModifySignature', {'signature': dic})
[ "def", "modify_signature", "(", "self", ",", "signature", ")", ":", "# if no content is specified, just use a selector (id/name)", "dic", "=", "signature", ".", "to_creator", "(", "for_modify", "=", "True", ")", "self", ".", "request", "(", "'ModifySignature'", ",", ...
Modify an existing signature Can modify the content, contenttype and name. An unset attribute will not delete the attribute but leave it untouched. :param: signature a zobject.Signature object, with modified content/contentype/name, the id should be present and valid, the name does not allows to identify the signature for that operation.
[ "Modify", "an", "existing", "signature" ]
python
train
learningequality/ricecooker
ricecooker/utils/linecook.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/utils/linecook.py#L46-L59
def rel_path_from_chan_path(chan_path, channeldir, windows=False): """ Convert `chan_path` as obtained from a metadata provider into a `rel_path` suitable for accessing the file from the current working directory, e.g., >>> rel_path_from_chan_path('Open Stax/Math', 'content/open_stax_zip/Open Stax') 'content/open_stax_zip/Open Stax/Math' """ if windows: chan_path_list = chan_path.split('\\') else: chan_path_list = chan_path.split('/') chan_path_list.pop(0) # remove the channel root dir rel_path = os.path.join(channeldir, *chan_path_list) return rel_path
[ "def", "rel_path_from_chan_path", "(", "chan_path", ",", "channeldir", ",", "windows", "=", "False", ")", ":", "if", "windows", ":", "chan_path_list", "=", "chan_path", ".", "split", "(", "'\\\\'", ")", "else", ":", "chan_path_list", "=", "chan_path", ".", "...
Convert `chan_path` as obtained from a metadata provider into a `rel_path` suitable for accessing the file from the current working directory, e.g., >>> rel_path_from_chan_path('Open Stax/Math', 'content/open_stax_zip/Open Stax') 'content/open_stax_zip/Open Stax/Math'
[ "Convert", "chan_path", "as", "obtained", "from", "a", "metadata", "provider", "into", "a", "rel_path", "suitable", "for", "accessing", "the", "file", "from", "the", "current", "working", "directory", "e", ".", "g", ".", ">>>", "rel_path_from_chan_path", "(", ...
python
train
tchellomello/python-arlo
pyarlo/base_station.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L438-L447
def get_speaker_muted(self): """Return whether or not the speaker is muted.""" if not self.camera_extended_properties: return None speaker = self.camera_extended_properties.get('speaker') if not speaker: return None return speaker.get('mute')
[ "def", "get_speaker_muted", "(", "self", ")", ":", "if", "not", "self", ".", "camera_extended_properties", ":", "return", "None", "speaker", "=", "self", ".", "camera_extended_properties", ".", "get", "(", "'speaker'", ")", "if", "not", "speaker", ":", "return...
Return whether or not the speaker is muted.
[ "Return", "whether", "or", "not", "the", "speaker", "is", "muted", "." ]
python
train
dslackw/slpkg
slpkg/main.py
https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/main.py#L119-L132
def command_update(self): """Update package lists repositories """ if len(self.args) == 1 and self.args[0] == "update": Update().repository(only="") elif (len(self.args) == 2 and self.args[0] == "update" and self.args[1].startswith("--only=")): repos = self.args[1].split("=")[-1].split(",") for rp in repos: if rp not in self.meta.repositories: repos.remove(rp) Update().repository(repos) else: usage("")
[ "def", "command_update", "(", "self", ")", ":", "if", "len", "(", "self", ".", "args", ")", "==", "1", "and", "self", ".", "args", "[", "0", "]", "==", "\"update\"", ":", "Update", "(", ")", ".", "repository", "(", "only", "=", "\"\"", ")", "elif...
Update package lists repositories
[ "Update", "package", "lists", "repositories" ]
python
train
bitesofcode/projexui
projexui/widgets/xcombobox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcombobox.py#L347-L371
def setCheckedIndexes(self, indexes): """ Sets a list of checked indexes for this combobox. :param indexes | [<int>, ..] """ if not self.isCheckable(): return model = self.model() model.blockSignals(True) for i in range(self.count()): if not self.itemText(i): continue item = model.item(i) if i in indexes: state = Qt.Checked else: state = Qt.Unchecked item.setCheckState(state) model.blockSignals(False) self.updateCheckedText()
[ "def", "setCheckedIndexes", "(", "self", ",", "indexes", ")", ":", "if", "not", "self", ".", "isCheckable", "(", ")", ":", "return", "model", "=", "self", ".", "model", "(", ")", "model", ".", "blockSignals", "(", "True", ")", "for", "i", "in", "rang...
Sets a list of checked indexes for this combobox. :param indexes | [<int>, ..]
[ "Sets", "a", "list", "of", "checked", "indexes", "for", "this", "combobox", ".", ":", "param", "indexes", "|", "[", "<int", ">", "..", "]" ]
python
train
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2016_05_31/storageclient.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2016_05_31/storageclient.py#L189-L285
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None): ''' Sends the request and return response. Catches HTTPError and hands it to error handler ''' operation_context = operation_context or _OperationContext() retry_context = RetryContext() # Apply the appropriate host based on the location mode self._apply_host(request, operation_context, retry_context) # Apply common settings to the request _update_request(request) while(True): try: try: # Execute the request callback if self.request_callback: self.request_callback(request) # Add date and auth after the callback so date doesn't get too old and # authentication is still correct if signed headers are added in the request # callback. This also ensures retry policies with long back offs # will work as it resets the time sensitive headers. _add_date_header(request) self.authentication.sign_request(request) # Set the request context retry_context.request = request # Perform the request response = self._httpclient.perform_request(request) # Execute the response callback if self.response_callback: self.response_callback(response) # Set the response context retry_context.response = response # Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException if response.status >= 300: # This exception will be caught by the general error handler # and raised as an azure http exception _http_error_handler(HTTPError(response.status, response.message, response.headers, response.body)) # Parse the response if parser: if parser_args: args = [response] args.extend(parser_args) return parser(*args) else: return parser(response) else: return except AzureException as ex: raise ex except Exception as ex: if sys.version_info >= (3,): # Automatic chaining in Python 3 means we keep the trace raise AzureException(ex.args[0]) else: # There isn't a good solution in 2 for keeping the stack trace # in general, or that will not result in an error in 3 # However, we can keep the previous error type and message # TODO: In the future we will log the trace msg = "" if len(ex.args) > 0: msg = ex.args[0] raise AzureException('{}: {}'.format(ex.__class__.__name__, msg)) except AzureException as ex: # Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc) # will not be resolved with retries. if str(ex) == _ERROR_DECRYPTION_FAILURE: raise ex # Determine whether a retry should be performed and if so, how # long to wait before performing retry. retry_interval = self.retry(retry_context) if retry_interval is not None: # Execute the callback if self.retry_callback: self.retry_callback(retry_context) # Sleep for the desired retry interval sleep(retry_interval) else: raise ex finally: # If this is a location locked operation and the location is not set, # this is the first request of that operation. Set the location to # be used for subsequent requests in the operation. if operation_context.location_lock and not operation_context.host_location: operation_context.host_location = {retry_context.location_mode: request.host}
[ "def", "_perform_request", "(", "self", ",", "request", ",", "parser", "=", "None", ",", "parser_args", "=", "None", ",", "operation_context", "=", "None", ")", ":", "operation_context", "=", "operation_context", "or", "_OperationContext", "(", ")", "retry_conte...
Sends the request and return response. Catches HTTPError and hands it to error handler
[ "Sends", "the", "request", "and", "return", "response", ".", "Catches", "HTTPError", "and", "hands", "it", "to", "error", "handler" ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/__init__.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/__init__.py#L336-L444
def _executor_internal_inner(self, host, module_name, module_args, inject, port, is_chained=False): ''' decides how to invoke a module ''' # allow module args to work as a dictionary # though it is usually a string new_args = "" if type(module_args) == dict: for (k,v) in module_args.iteritems(): new_args = new_args + "%s='%s' " % (k,v) module_args = new_args module_name = utils.template(self.basedir, module_name, inject) module_args = utils.template(self.basedir, module_args, inject, expand_lists=True) if module_name in utils.plugins.action_loader: if self.background != 0: raise errors.AnsibleError("async mode is not supported with the %s module" % module_name) handler = utils.plugins.action_loader.get(module_name, self) elif self.background == 0: handler = utils.plugins.action_loader.get('normal', self) else: handler = utils.plugins.action_loader.get('async', self) conditional = utils.template(self.basedir, self.conditional, inject) if not getattr(handler, 'BYPASS_HOST_LOOP', False) and not utils.check_conditional(conditional): result = utils.jsonify(dict(skipped=True)) self.callbacks.on_skipped(host, inject.get('item',None)) return ReturnData(host=host, result=result) conn = None actual_host = inject.get('ansible_ssh_host', host) actual_port = port if self.transport in [ 'paramiko', 'ssh' ]: actual_port = inject.get('ansible_ssh_port', port) # the delegated host may have different SSH port configured, etc # and we need to transfer those, and only those, variables delegate_to = inject.get('delegate_to', None) if delegate_to is not None: delegate_to = utils.template(self.basedir, delegate_to, inject) inject = inject.copy() interpreters = [] for i in inject: if i.startswith("ansible_") and i.endswith("_interpreter"): interpreters.append(i) for i in interpreters: del inject[i] port = C.DEFAULT_REMOTE_PORT try: delegate_info = inject['hostvars'][delegate_to] actual_host = delegate_info.get('ansible_ssh_host', delegate_to) actual_port = delegate_info.get('ansible_ssh_port', port) for i in delegate_info: if i.startswith("ansible_") and i.endswith("_interpreter"): inject[i] = delegate_info[i] except errors.AnsibleError: actual_host = delegate_to actual_port = port try: if actual_port is not None: actual_port = int(actual_port) except ValueError, e: result = dict(failed=True, msg="FAILED: Configured port \"%s\" is not a valid port, expected integer" % actual_port) return ReturnData(host=host, comm_ok=False, result=result) try: conn = self.connector.connect(actual_host, actual_port) if delegate_to or host != actual_host: conn.delegate = host except errors.AnsibleConnectionFailed, e: result = dict(failed=True, msg="FAILED: %s" % str(e)) return ReturnData(host=host, comm_ok=False, result=result) tmp = '' # all modules get a tempdir, action plugins get one unless they have NEEDS_TMPPATH set to False if getattr(handler, 'NEEDS_TMPPATH', True): tmp = self._make_tmp_path(conn) result = handler.run(conn, tmp, module_name, module_args, inject) conn.close() if not result.comm_ok: # connection or parsing errors... self.callbacks.on_unreachable(host, result.result) else: data = result.result if 'item' in inject: result.result['item'] = inject['item'] result.result['invocation'] = dict( module_args=module_args, module_name=module_name ) if is_chained: # no callbacks return result if 'skipped' in data: self.callbacks.on_skipped(host) elif not result.is_successful(): ignore_errors = self.module_vars.get('ignore_errors', False) self.callbacks.on_failed(host, data, ignore_errors) else: self.callbacks.on_ok(host, data) return result
[ "def", "_executor_internal_inner", "(", "self", ",", "host", ",", "module_name", ",", "module_args", ",", "inject", ",", "port", ",", "is_chained", "=", "False", ")", ":", "# allow module args to work as a dictionary", "# though it is usually a string", "new_args", "=",...
decides how to invoke a module
[ "decides", "how", "to", "invoke", "a", "module" ]
python
train
wavefrontHQ/python-client
wavefront_api_client/api/derived_metric_api.py
https://github.com/wavefrontHQ/python-client/blob/b0f1046a8f68c2c7d69e395f7167241f224c738a/wavefront_api_client/api/derived_metric_api.py#L333-L354
def get_all_derived_metrics(self, **kwargs): # noqa: E501 """Get all derived metric definitions for a customer # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_derived_metrics(async_req=True) >>> result = thread.get() :param async_req bool :param int offset: :param int limit: :return: ResponseContainerPagedDerivedMetricDefinition If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_all_derived_metrics_with_http_info(**kwargs) # noqa: E501 else: (data) = self.get_all_derived_metrics_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "get_all_derived_metrics", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "get_all_derived_met...
Get all derived metric definitions for a customer # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_all_derived_metrics(async_req=True) >>> result = thread.get() :param async_req bool :param int offset: :param int limit: :return: ResponseContainerPagedDerivedMetricDefinition If the method is called asynchronously, returns the request thread.
[ "Get", "all", "derived", "metric", "definitions", "for", "a", "customer", "#", "noqa", ":", "E501" ]
python
train
telefonicaid/fiware-sdc
python-sdcclient/utils/rest_client_utils.py
https://github.com/telefonicaid/fiware-sdc/blob/d2d5f87fc574caf6bcc49594bbcb31f620ba8c51/python-sdcclient/utils/rest_client_utils.py#L87-L114
def _call_api(self, uri_pattern, method, body=None, headers=None, parameters=None, **kwargs): """ Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param method: HTTP method to execute (string) [get | post | put | delete | update] :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param headers: HTTP header request (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without API_ROOT_URL_ARG_NAME) to fill the patters :returns: REST API response ('Requests' response) """ kwargs[API_ROOT_URL_ARG_NAME] = self.api_root_url url = uri_pattern.format(**kwargs) logger.info("Executing API request [%s %s]", method, url) log_print_request(logger, method, url, parameters, headers, body) try: response = requests.request(method=method, url=url, data=body, headers=headers, params=parameters, verify=False) except Exception, e: logger.error("Request {} to {} crashed: {}".format(method, url, str(e))) raise e log_print_response(logger, response) return response
[ "def", "_call_api", "(", "self", ",", "uri_pattern", ",", "method", ",", "body", "=", "None", ",", "headers", "=", "None", ",", "parameters", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "API_ROOT_URL_ARG_NAME", "]", "=", "self", ".",...
Launch HTTP request to the API with given arguments :param uri_pattern: string pattern of the full API url with keyword arguments (format string syntax) :param method: HTTP method to execute (string) [get | post | put | delete | update] :param body: Raw Body content (string) (Plain/XML/JSON to be sent) :param headers: HTTP header request (dict) :param parameters: Query parameters for the URL. i.e. {'key1': 'value1', 'key2': 'value2'} :param **kwargs: URL parameters (without API_ROOT_URL_ARG_NAME) to fill the patters :returns: REST API response ('Requests' response)
[ "Launch", "HTTP", "request", "to", "the", "API", "with", "given", "arguments", ":", "param", "uri_pattern", ":", "string", "pattern", "of", "the", "full", "API", "url", "with", "keyword", "arguments", "(", "format", "string", "syntax", ")", ":", "param", "...
python
train
playpauseandstop/bootstrapper
bootstrapper.py
https://github.com/playpauseandstop/bootstrapper/blob/b216a05f2acb0b9f4919c4e010ff7b0f63fc1393/bootstrapper.py#L68-L84
def check_pre_requirements(pre_requirements): """Check all necessary system requirements to exist. :param pre_requirements: Sequence of pre-requirements to check by running ``where <pre_requirement>`` on Windows and ``which ...`` elsewhere. """ pre_requirements = set(pre_requirements or []) pre_requirements.add('virtualenv') for requirement in pre_requirements: if not which(requirement): print_error('Requirement {0!r} is not found in system'. format(requirement)) return False return True
[ "def", "check_pre_requirements", "(", "pre_requirements", ")", ":", "pre_requirements", "=", "set", "(", "pre_requirements", "or", "[", "]", ")", "pre_requirements", ".", "add", "(", "'virtualenv'", ")", "for", "requirement", "in", "pre_requirements", ":", "if", ...
Check all necessary system requirements to exist. :param pre_requirements: Sequence of pre-requirements to check by running ``where <pre_requirement>`` on Windows and ``which ...`` elsewhere.
[ "Check", "all", "necessary", "system", "requirements", "to", "exist", "." ]
python
valid
ucfopen/canvasapi
canvasapi/canvas.py
https://github.com/ucfopen/canvasapi/blob/319064b5fc97ba54250af683eb98723ef3f76cf8/canvasapi/canvas.py#L1058-L1078
def get_outcome(self, outcome): """ Returns the details of the outcome with the given id. :calls: `GET /api/v1/outcomes/:id \ <https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_ :param outcome: The outcome object or ID to return. :type outcome: :class:`canvasapi.outcome.Outcome` or int :returns: An Outcome object. :rtype: :class:`canvasapi.outcome.Outcome` """ from canvasapi.outcome import Outcome outcome_id = obj_or_id(outcome, "outcome", (Outcome,)) response = self.__requester.request( 'GET', 'outcomes/{}'.format(outcome_id) ) return Outcome(self.__requester, response.json())
[ "def", "get_outcome", "(", "self", ",", "outcome", ")", ":", "from", "canvasapi", ".", "outcome", "import", "Outcome", "outcome_id", "=", "obj_or_id", "(", "outcome", ",", "\"outcome\"", ",", "(", "Outcome", ",", ")", ")", "response", "=", "self", ".", "...
Returns the details of the outcome with the given id. :calls: `GET /api/v1/outcomes/:id \ <https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_ :param outcome: The outcome object or ID to return. :type outcome: :class:`canvasapi.outcome.Outcome` or int :returns: An Outcome object. :rtype: :class:`canvasapi.outcome.Outcome`
[ "Returns", "the", "details", "of", "the", "outcome", "with", "the", "given", "id", "." ]
python
train
saltstack/salt
salt/fileclient.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/fileclient.py#L67-L84
def decode_dict_keys_to_str(src): ''' Convert top level keys from bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types. ''' if not six.PY3 or not isinstance(src, dict): return src output = {} for key, val in six.iteritems(src): if isinstance(key, bytes): try: key = key.decode() except UnicodeError: pass output[key] = val return output
[ "def", "decode_dict_keys_to_str", "(", "src", ")", ":", "if", "not", "six", ".", "PY3", "or", "not", "isinstance", "(", "src", ",", "dict", ")", ":", "return", "src", "output", "=", "{", "}", "for", "key", ",", "val", "in", "six", ".", "iteritems", ...
Convert top level keys from bytes to strings if possible. This is necessary because Python 3 makes a distinction between these types.
[ "Convert", "top", "level", "keys", "from", "bytes", "to", "strings", "if", "possible", ".", "This", "is", "necessary", "because", "Python", "3", "makes", "a", "distinction", "between", "these", "types", "." ]
python
train
explosion/spaCy
spacy/util.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/util.py#L149-L152
def load_model_from_package(name, **overrides): """Load a model from an installed package.""" cls = importlib.import_module(name) return cls.load(**overrides)
[ "def", "load_model_from_package", "(", "name", ",", "*", "*", "overrides", ")", ":", "cls", "=", "importlib", ".", "import_module", "(", "name", ")", "return", "cls", ".", "load", "(", "*", "*", "overrides", ")" ]
Load a model from an installed package.
[ "Load", "a", "model", "from", "an", "installed", "package", "." ]
python
train
spyder-ide/spyder-notebook
spyder_notebook/notebookplugin.py
https://github.com/spyder-ide/spyder-notebook/blob/54e626b9d2a3fccd3e4625b0f97fe06e5bb1a6db/spyder_notebook/notebookplugin.py#L470-L473
def move_tab(self, index_from, index_to): """Move tab.""" client = self.clients.pop(index_from) self.clients.insert(index_to, client)
[ "def", "move_tab", "(", "self", ",", "index_from", ",", "index_to", ")", ":", "client", "=", "self", ".", "clients", ".", "pop", "(", "index_from", ")", "self", ".", "clients", ".", "insert", "(", "index_to", ",", "client", ")" ]
Move tab.
[ "Move", "tab", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L2101-L2129
def states(self, states): """ Setter for _states field See property :param states: Dictionary of States :raises exceptions.TypeError: if the states parameter is of wrong type :raises exceptions.AttributeError: if the keys of the dictionary and the state_ids in the dictionary do not match """ if not isinstance(states, dict): raise TypeError("states must be of type dict") if [state_id for state_id, state in states.items() if not isinstance(state, State)]: raise TypeError("element of container_state.states must be of type State") if [state_id for state_id, state in states.items() if not state_id == state.state_id]: raise AttributeError("The key of the state dictionary and the id of the state do not match") old_states = self._states self._states = states for state_id, state in states.items(): try: state.parent = self except ValueError: self._states = old_states raise # check that all old_states are no more referencing self as there parent for old_state in old_states.values(): if old_state not in self._states.values() and old_state.parent is self: old_state.parent = None
[ "def", "states", "(", "self", ",", "states", ")", ":", "if", "not", "isinstance", "(", "states", ",", "dict", ")", ":", "raise", "TypeError", "(", "\"states must be of type dict\"", ")", "if", "[", "state_id", "for", "state_id", ",", "state", "in", "states...
Setter for _states field See property :param states: Dictionary of States :raises exceptions.TypeError: if the states parameter is of wrong type :raises exceptions.AttributeError: if the keys of the dictionary and the state_ids in the dictionary do not match
[ "Setter", "for", "_states", "field" ]
python
train
ladybug-tools/ladybug
ladybug/datacollection.py
https://github.com/ladybug-tools/ladybug/blob/c08b7308077a48d5612f644943f92d5b5dade583/ladybug/datacollection.py#L1322-L1340
def filter_by_months_per_hour(self, months_per_hour): """Filter the Data Collection based on a list of months per hour (as strings). Args: months_per_hour: A list of tuples representing months per hour. Each tuple should possess two values: the first is the month and the second is the hour. (eg. (12, 23) = December at 11 PM) Return: A new Data Collection with filtered data """ _filt_values = [] _filt_datetimes = [] for i, d in enumerate(self.datetimes): if d in months_per_hour: _filt_datetimes.append(d) _filt_values.append(self._values[i]) return MonthlyPerHourCollection( self.header.duplicate(), _filt_values, _filt_datetimes)
[ "def", "filter_by_months_per_hour", "(", "self", ",", "months_per_hour", ")", ":", "_filt_values", "=", "[", "]", "_filt_datetimes", "=", "[", "]", "for", "i", ",", "d", "in", "enumerate", "(", "self", ".", "datetimes", ")", ":", "if", "d", "in", "months...
Filter the Data Collection based on a list of months per hour (as strings). Args: months_per_hour: A list of tuples representing months per hour. Each tuple should possess two values: the first is the month and the second is the hour. (eg. (12, 23) = December at 11 PM) Return: A new Data Collection with filtered data
[ "Filter", "the", "Data", "Collection", "based", "on", "a", "list", "of", "months", "per", "hour", "(", "as", "strings", ")", "." ]
python
train
epfl-lts2/pygsp
pygsp/graphs/graph.py
https://github.com/epfl-lts2/pygsp/blob/8ce5bde39206129287375af24fdbcd7edddca8c5/pygsp/graphs/graph.py#L219-L256
def subgraph(self, vertices): r"""Create a subgraph from a list of vertices. Parameters ---------- vertices : list Vertices to keep. Either a list of indices or an indicator function. Returns ------- subgraph : :class:`Graph` Subgraph. Examples -------- >>> graph = graphs.Graph([ ... [0., 3., 0., 0.], ... [3., 0., 4., 0.], ... [0., 4., 0., 2.], ... [0., 0., 2., 0.], ... ]) >>> graph = graph.subgraph([0, 2, 1]) >>> graph.W.toarray() array([[0., 0., 3.], [0., 0., 4.], [3., 4., 0.]]) """ adjacency = self.W[vertices, :][:, vertices] try: coords = self.coords[vertices] except AttributeError: coords = None graph = Graph(adjacency, self.lap_type, coords, self.plotting) for name, signal in self.signals.items(): graph.set_signal(signal[vertices], name) return graph
[ "def", "subgraph", "(", "self", ",", "vertices", ")", ":", "adjacency", "=", "self", ".", "W", "[", "vertices", ",", ":", "]", "[", ":", ",", "vertices", "]", "try", ":", "coords", "=", "self", ".", "coords", "[", "vertices", "]", "except", "Attrib...
r"""Create a subgraph from a list of vertices. Parameters ---------- vertices : list Vertices to keep. Either a list of indices or an indicator function. Returns ------- subgraph : :class:`Graph` Subgraph. Examples -------- >>> graph = graphs.Graph([ ... [0., 3., 0., 0.], ... [3., 0., 4., 0.], ... [0., 4., 0., 2.], ... [0., 0., 2., 0.], ... ]) >>> graph = graph.subgraph([0, 2, 1]) >>> graph.W.toarray() array([[0., 0., 3.], [0., 0., 4.], [3., 4., 0.]])
[ "r", "Create", "a", "subgraph", "from", "a", "list", "of", "vertices", "." ]
python
train
Esri/ArcREST
src/arcrest/ags/_gpobjects.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/ags/_gpobjects.py#L184-L190
def loadFeatures(self, path_to_fc): """ loads a feature class features to the object """ from ..common.spatial import featureclass_to_json v = json.loads(featureclass_to_json(path_to_fc)) self.value = v
[ "def", "loadFeatures", "(", "self", ",", "path_to_fc", ")", ":", "from", ".", ".", "common", ".", "spatial", "import", "featureclass_to_json", "v", "=", "json", ".", "loads", "(", "featureclass_to_json", "(", "path_to_fc", ")", ")", "self", ".", "value", "...
loads a feature class features to the object
[ "loads", "a", "feature", "class", "features", "to", "the", "object" ]
python
train
Hackerfleet/hfos
modules/enrol/hfos/enrol/enrolmanager.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/modules/enrol/hfos/enrol/enrolmanager.py#L651-L675
def _invite(self, name, method, email, uuid, event, password=""): """Actually invite a given user""" props = { 'uuid': std_uuid(), 'status': 'Open', 'name': name, 'method': method, 'email': email, 'password': password, 'timestamp': std_now() } enrollment = objectmodels['enrollment'](props) enrollment.save() self.log('Enrollment stored', lvl=debug) self._send_invitation(enrollment, event) packet = { 'component': 'hfos.enrol.enrolmanager', 'action': 'invite', 'data': [True, email] } self.fireEvent(send(uuid, packet))
[ "def", "_invite", "(", "self", ",", "name", ",", "method", ",", "email", ",", "uuid", ",", "event", ",", "password", "=", "\"\"", ")", ":", "props", "=", "{", "'uuid'", ":", "std_uuid", "(", ")", ",", "'status'", ":", "'Open'", ",", "'name'", ":", ...
Actually invite a given user
[ "Actually", "invite", "a", "given", "user" ]
python
train