repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ayust/kitnirc
kitnirc/client.py
https://github.com/ayust/kitnirc/blob/cf19fe39219da75f053e1a3976bf21331b6fefea/kitnirc/client.py#L995-L1002
def _parse_nicknameinuse(client, command, actor, args): """Parse a NICKNAMEINUSE message and dispatch an event. The parameter passed along with the event is the nickname which is already in use. """ nick, _, _ = args.rpartition(" ") client.dispatch_event("NICKNAMEINUSE", nick)
[ "def", "_parse_nicknameinuse", "(", "client", ",", "command", ",", "actor", ",", "args", ")", ":", "nick", ",", "_", ",", "_", "=", "args", ".", "rpartition", "(", "\" \"", ")", "client", ".", "dispatch_event", "(", "\"NICKNAMEINUSE\"", ",", "nick", ")" ...
Parse a NICKNAMEINUSE message and dispatch an event. The parameter passed along with the event is the nickname which is already in use.
[ "Parse", "a", "NICKNAMEINUSE", "message", "and", "dispatch", "an", "event", "." ]
python
train
b3j0f/utils
b3j0f/utils/property.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/property.py#L638-L657
def firsts(properties): """ Transform a dictionary of {name: [(elt, value)+]} (resulting from get_properties) to a dictionary of {name, value} where names are first encountered in input properties. :param dict properties: properties to firsts. :return: dictionary of parameter values by names. :rtype: dict """ result = {} # parse elts for name in properties: elt_properties = properties[name] # add property values in result[name] result[name] = elt_properties[0][1] return result
[ "def", "firsts", "(", "properties", ")", ":", "result", "=", "{", "}", "# parse elts", "for", "name", "in", "properties", ":", "elt_properties", "=", "properties", "[", "name", "]", "# add property values in result[name]", "result", "[", "name", "]", "=", "elt...
Transform a dictionary of {name: [(elt, value)+]} (resulting from get_properties) to a dictionary of {name, value} where names are first encountered in input properties. :param dict properties: properties to firsts. :return: dictionary of parameter values by names. :rtype: dict
[ "Transform", "a", "dictionary", "of", "{", "name", ":", "[", "(", "elt", "value", ")", "+", "]", "}", "(", "resulting", "from", "get_properties", ")", "to", "a", "dictionary", "of", "{", "name", "value", "}", "where", "names", "are", "first", "encounte...
python
train
xiaocong/uiautomator
uiautomator/__init__.py
https://github.com/xiaocong/uiautomator/blob/9a0c892ffd056713f91aa2153d1533c5b0553a1c/uiautomator/__init__.py#L1090-L1095
def child(self, **kwargs): '''set childSelector.''' return AutomatorDeviceObject( self.device, self.selector.clone().child(**kwargs) )
[ "def", "child", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "AutomatorDeviceObject", "(", "self", ".", "device", ",", "self", ".", "selector", ".", "clone", "(", ")", ".", "child", "(", "*", "*", "kwargs", ")", ")" ]
set childSelector.
[ "set", "childSelector", "." ]
python
train
foremast/foremast
src/foremast/awslambda/awslambda.py
https://github.com/foremast/foremast/blob/fb70f29b8ce532f061685a17d120486e47b215ba/src/foremast/awslambda/awslambda.py#L118-L134
def _get_sg_ids(self): """Get IDs for all defined security groups. Returns: list: security group IDs for all lambda_extras """ try: lambda_extras = self.settings['security_groups']['lambda_extras'] except KeyError: lambda_extras = [] security_groups = [self.app_name] + lambda_extras sg_ids = [] for security_group in security_groups: sg_id = get_security_group_id(name=security_group, env=self.env, region=self.region) sg_ids.append(sg_id) return sg_ids
[ "def", "_get_sg_ids", "(", "self", ")", ":", "try", ":", "lambda_extras", "=", "self", ".", "settings", "[", "'security_groups'", "]", "[", "'lambda_extras'", "]", "except", "KeyError", ":", "lambda_extras", "=", "[", "]", "security_groups", "=", "[", "self"...
Get IDs for all defined security groups. Returns: list: security group IDs for all lambda_extras
[ "Get", "IDs", "for", "all", "defined", "security", "groups", "." ]
python
train
manns/pyspread
pyspread/src/lib/vlc.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/vlc.py#L1820-L1830
def vlm_add_vod(self, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux): '''Add a vod, with one input. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error. ''' return libvlc_vlm_add_vod(self, str_to_bytes(psz_name), str_to_bytes(psz_input), i_options, ppsz_options, b_enabled, str_to_bytes(psz_mux))
[ "def", "vlm_add_vod", "(", "self", ",", "psz_name", ",", "psz_input", ",", "i_options", ",", "ppsz_options", ",", "b_enabled", ",", "psz_mux", ")", ":", "return", "libvlc_vlm_add_vod", "(", "self", ",", "str_to_bytes", "(", "psz_name", ")", ",", "str_to_bytes"...
Add a vod, with one input. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error.
[ "Add", "a", "vod", "with", "one", "input", "." ]
python
train
jorgenschaefer/elpy
elpy/server.py
https://github.com/jorgenschaefer/elpy/blob/ffd982f829b11e53f2be187c7b770423341f29bc/elpy/server.py#L232-L255
def get_source(fileobj): """Translate fileobj into file contents. fileobj is either a string or a dict. If it's a string, that's the file contents. If it's a string, then the filename key contains the name of the file whose contents we are to use. If the dict contains a true value for the key delete_after_use, the file should be deleted once read. """ if not isinstance(fileobj, dict): return fileobj else: try: with io.open(fileobj["filename"], encoding="utf-8", errors="ignore") as f: return f.read() finally: if fileobj.get('delete_after_use'): try: os.remove(fileobj["filename"]) except: # pragma: no cover pass
[ "def", "get_source", "(", "fileobj", ")", ":", "if", "not", "isinstance", "(", "fileobj", ",", "dict", ")", ":", "return", "fileobj", "else", ":", "try", ":", "with", "io", ".", "open", "(", "fileobj", "[", "\"filename\"", "]", ",", "encoding", "=", ...
Translate fileobj into file contents. fileobj is either a string or a dict. If it's a string, that's the file contents. If it's a string, then the filename key contains the name of the file whose contents we are to use. If the dict contains a true value for the key delete_after_use, the file should be deleted once read.
[ "Translate", "fileobj", "into", "file", "contents", "." ]
python
train
cjdrake/pyeda
pyeda/logic/aes.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/logic/aes.py#L234-L244
def inv_sub_bytes(state): """ Transformation in the Inverse Cipher that is the inverse of SubBytes(). """ state = state.reshape(4, 32) return fcat( invsubword(state[0]), invsubword(state[1]), invsubword(state[2]), invsubword(state[3]), )
[ "def", "inv_sub_bytes", "(", "state", ")", ":", "state", "=", "state", ".", "reshape", "(", "4", ",", "32", ")", "return", "fcat", "(", "invsubword", "(", "state", "[", "0", "]", ")", ",", "invsubword", "(", "state", "[", "1", "]", ")", ",", "inv...
Transformation in the Inverse Cipher that is the inverse of SubBytes().
[ "Transformation", "in", "the", "Inverse", "Cipher", "that", "is", "the", "inverse", "of", "SubBytes", "()", "." ]
python
train
galaxyproject/pulsar
pulsar/client/manager.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/client/manager.py#L83-L90
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
[ "def", "get_client", "(", "self", ",", "destination_params", ",", "job_id", ",", "*", "*", "kwargs", ")", ":", "destination_params", "=", "_parse_destination_params", "(", "destination_params", ")", "destination_params", ".", "update", "(", "*", "*", "kwargs", "...
Build a client given specific destination parameters and job_id.
[ "Build", "a", "client", "given", "specific", "destination", "parameters", "and", "job_id", "." ]
python
train
frictionlessdata/tableschema-bigquery-py
tableschema_bigquery/mapper.py
https://github.com/frictionlessdata/tableschema-bigquery-py/blob/aec6f0530ba5a0a08499f5e7a10f2c179c500285/tableschema_bigquery/mapper.py#L30-L57
def convert_descriptor(self, descriptor): """Convert descriptor to BigQuery """ # Fields fields = [] fallbacks = [] schema = tableschema.Schema(descriptor) for index, field in enumerate(schema.fields): converted_type = self.convert_type(field.type) if not converted_type: converted_type = 'STRING' fallbacks.append(index) mode = 'NULLABLE' if field.required: mode = 'REQUIRED' fields.append({ 'name': _slugify_field_name(field.name), 'type': converted_type, 'mode': mode, }) # Descriptor converted_descriptor = { 'fields': fields, } return (converted_descriptor, fallbacks)
[ "def", "convert_descriptor", "(", "self", ",", "descriptor", ")", ":", "# Fields", "fields", "=", "[", "]", "fallbacks", "=", "[", "]", "schema", "=", "tableschema", ".", "Schema", "(", "descriptor", ")", "for", "index", ",", "field", "in", "enumerate", ...
Convert descriptor to BigQuery
[ "Convert", "descriptor", "to", "BigQuery" ]
python
train
google/grr
grr/core/grr_response_core/lib/parser.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parser.py#L130-L136
def GetClassesByArtifact(cls, artifact_name): """Get the classes that support parsing a given artifact.""" return [ cls.classes[c] for c in cls.classes if artifact_name in cls.classes[c].supported_artifacts ]
[ "def", "GetClassesByArtifact", "(", "cls", ",", "artifact_name", ")", ":", "return", "[", "cls", ".", "classes", "[", "c", "]", "for", "c", "in", "cls", ".", "classes", "if", "artifact_name", "in", "cls", ".", "classes", "[", "c", "]", ".", "supported_...
Get the classes that support parsing a given artifact.
[ "Get", "the", "classes", "that", "support", "parsing", "a", "given", "artifact", "." ]
python
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/account_model/future_account.py
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/account_model/future_account.py#L213-L217
def realized_pnl(self): """ [float] 平仓盈亏 """ return sum(position.realized_pnl for position in six.itervalues(self._positions))
[ "def", "realized_pnl", "(", "self", ")", ":", "return", "sum", "(", "position", ".", "realized_pnl", "for", "position", "in", "six", ".", "itervalues", "(", "self", ".", "_positions", ")", ")" ]
[float] 平仓盈亏
[ "[", "float", "]", "平仓盈亏" ]
python
train
rraadd88/rohan
rohan/dandage/stat/norm.py
https://github.com/rraadd88/rohan/blob/b0643a3582a2fffc0165ace69fb80880d92bfb10/rohan/dandage/stat/norm.py#L4-L39
def quantile_norm(X): """Normalize the columns of X to each have the same distribution. Given an expression matrix (microarray data, read counts, etc) of M genes by N samples, quantile normalization ensures all samples have the same spread of data (by construction). The data across each row are averaged to obtain an average column. Each column quantile is replaced with the corresponding quantile of the average column. Parameters ---------- X : 2D array of float, shape (M, N) The input data, with M rows (genes/features) and N columns (samples). Returns ------- Xn : 2D array of float, shape (M, N) The normalized data. """ # compute the quantiles quantiles = np.mean(np.sort(X, axis=0), axis=1) # compute the column-wise ranks. Each observation is replaced with its # rank in that column: the smallest observation is replaced by 1, the # second-smallest by 2, ..., and the largest by M, the number of rows. ranks = np.apply_along_axis(stats.rankdata, 0, X) # convert ranks to integer indices from 0 to M-1 rank_indices = ranks.astype(int) - 1 # index the quantiles for each rank with the ranks matrix Xn = quantiles[rank_indices] return(Xn)
[ "def", "quantile_norm", "(", "X", ")", ":", "# compute the quantiles", "quantiles", "=", "np", ".", "mean", "(", "np", ".", "sort", "(", "X", ",", "axis", "=", "0", ")", ",", "axis", "=", "1", ")", "# compute the column-wise ranks. Each observation is replaced...
Normalize the columns of X to each have the same distribution. Given an expression matrix (microarray data, read counts, etc) of M genes by N samples, quantile normalization ensures all samples have the same spread of data (by construction). The data across each row are averaged to obtain an average column. Each column quantile is replaced with the corresponding quantile of the average column. Parameters ---------- X : 2D array of float, shape (M, N) The input data, with M rows (genes/features) and N columns (samples). Returns ------- Xn : 2D array of float, shape (M, N) The normalized data.
[ "Normalize", "the", "columns", "of", "X", "to", "each", "have", "the", "same", "distribution", "." ]
python
train
rocky/python-uncompyle6
uncompyle6/show.py
https://github.com/rocky/python-uncompyle6/blob/c5d7944e657f0ad05a0e2edd34e1acb27001abc0/uncompyle6/show.py#L18-L32
def maybe_show_asm(showasm, tokens): """ Show the asm based on the showasm flag (or file object), writing to the appropriate stream depending on the type of the flag. :param showasm: Flag which determines whether the ingested code is written to sys.stdout or not. (It is also to pass a file like object, into which the asm will be written). :param tokens: The asm tokens to show. """ if showasm: stream = showasm if hasattr(showasm, 'write') else sys.stdout for t in tokens: stream.write(str(t)) stream.write('\n')
[ "def", "maybe_show_asm", "(", "showasm", ",", "tokens", ")", ":", "if", "showasm", ":", "stream", "=", "showasm", "if", "hasattr", "(", "showasm", ",", "'write'", ")", "else", "sys", ".", "stdout", "for", "t", "in", "tokens", ":", "stream", ".", "write...
Show the asm based on the showasm flag (or file object), writing to the appropriate stream depending on the type of the flag. :param showasm: Flag which determines whether the ingested code is written to sys.stdout or not. (It is also to pass a file like object, into which the asm will be written). :param tokens: The asm tokens to show.
[ "Show", "the", "asm", "based", "on", "the", "showasm", "flag", "(", "or", "file", "object", ")", "writing", "to", "the", "appropriate", "stream", "depending", "on", "the", "type", "of", "the", "flag", "." ]
python
train
angr/angr
angr/analyses/vfg.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/vfg.py#L513-L542
def _job_sorting_key(self, job): """ Get the sorting key of a VFGJob instance. :param VFGJob job: the VFGJob object. :return: An integer that determines the order of this job in the queue. :rtype: int """ MAX_BLOCKS_PER_FUNCTION = 1000000 task_functions = list(reversed( list(task.function_address for task in self._task_stack if isinstance(task, FunctionAnalysis)) )) try: function_pos = task_functions.index(job.func_addr) except ValueError: # not in the list # it might be because we followed the wrong path, or there is a bug in the traversal algorithm # anyways, do it first l.warning('Function address %#x is not found in task stack.', job.func_addr) return 0 try: block_in_function_pos = self._ordered_node_addrs(job.func_addr).index(job.addr) except ValueError: # block not found. what? block_in_function_pos = min(job.addr - job.func_addr, MAX_BLOCKS_PER_FUNCTION - 1) return block_in_function_pos + MAX_BLOCKS_PER_FUNCTION * function_pos
[ "def", "_job_sorting_key", "(", "self", ",", "job", ")", ":", "MAX_BLOCKS_PER_FUNCTION", "=", "1000000", "task_functions", "=", "list", "(", "reversed", "(", "list", "(", "task", ".", "function_address", "for", "task", "in", "self", ".", "_task_stack", "if", ...
Get the sorting key of a VFGJob instance. :param VFGJob job: the VFGJob object. :return: An integer that determines the order of this job in the queue. :rtype: int
[ "Get", "the", "sorting", "key", "of", "a", "VFGJob", "instance", "." ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/mapping.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/mapping.py#L587-L649
def save(self): """ save or update this cluster in Ariane Server :return: """ LOGGER.debug("Cluster.save") post_payload = {} consolidated_containers_id = [] if self.id is not None: post_payload['clusterID'] = self.id if self.name is not None: post_payload['clusterName'] = self.name if self.containers_id is not None: consolidated_containers_id = copy.deepcopy(self.containers_id) if self.containers_2_rm is not None: for container_2_rm in self.containers_2_rm: if container_2_rm.id is None: container_2_rm.sync() consolidated_containers_id.remove(container_2_rm.id) if self.containers_2_add is not None: for container_2_add in self.containers_2_add: if container_2_add.id is None: container_2_add.save() consolidated_containers_id.append(container_2_add.id) post_payload['clusterContainersID'] = consolidated_containers_id params = SessionService.complete_transactional_req({'payload': json.dumps(post_payload)}) if MappingService.driver_type != DriverFactory.DRIVER_REST: params['OPERATION'] = 'createCluster' args = {'properties': params} else: args = { 'http_operation': 'POST', 'operation_path': '', 'parameters': params } response = ClusterService.requester.call(args) if MappingService.driver_type != DriverFactory.DRIVER_REST: response = response.get() if response.rc != 0: LOGGER.warning('Cluster.save - Problem while saving cluster' + self.name + '. Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + " (" + str(response.rc) + ")") if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message: raise ArianeMappingOverloadError("Cluster.save", ArianeMappingOverloadError.ERROR_MSG) # traceback.print_stack() else: self.id = response.response_content['clusterID'] if self.containers_2_add is not None: for container_2_add in self.containers_2_add: container_2_add.sync() if self.containers_2_rm is not None: for container_2_rm in self.containers_2_rm: container_2_rm.sync() self.sync(json_obj=response.response_content) self.containers_2_add.clear() self.containers_2_rm.clear()
[ "def", "save", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "\"Cluster.save\"", ")", "post_payload", "=", "{", "}", "consolidated_containers_id", "=", "[", "]", "if", "self", ".", "id", "is", "not", "None", ":", "post_payload", "[", "'clusterID'", ...
save or update this cluster in Ariane Server :return:
[ "save", "or", "update", "this", "cluster", "in", "Ariane", "Server", ":", "return", ":" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/firewall.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/firewall.py#L168-L212
def _get_fwl_port_speed(self, server_id, is_virt=True): """Determines the appropriate speed for a firewall. :param int server_id: The ID of server the firewall is for :param bool is_virt: True if the server_id is for a virtual server :returns: a integer representing the Mbps speed of a firewall """ fwl_port_speed = 0 if is_virt: mask = ('primaryNetworkComponent[maxSpeed]') svc = self.client['Virtual_Guest'] primary = svc.getObject(mask=mask, id=server_id) fwl_port_speed = primary['primaryNetworkComponent']['maxSpeed'] else: mask = ('id,maxSpeed,networkComponentGroup.networkComponents') svc = self.client['Hardware_Server'] network_components = svc.getFrontendNetworkComponents( mask=mask, id=server_id) grouped = [interface['networkComponentGroup']['networkComponents'] for interface in network_components if 'networkComponentGroup' in interface] ungrouped = [interface for interface in network_components if 'networkComponentGroup' not in interface] # For each group, sum the maxSpeeds of each compoment in the # group. Put the sum for each in a new list group_speeds = [] for group in grouped: group_speed = 0 for interface in group: group_speed += interface['maxSpeed'] group_speeds.append(group_speed) # The max speed of all groups is the max of the list max_grouped_speed = max(group_speeds) max_ungrouped = 0 for interface in ungrouped: max_ungrouped = max(max_ungrouped, interface['maxSpeed']) fwl_port_speed = max(max_grouped_speed, max_ungrouped) return fwl_port_speed
[ "def", "_get_fwl_port_speed", "(", "self", ",", "server_id", ",", "is_virt", "=", "True", ")", ":", "fwl_port_speed", "=", "0", "if", "is_virt", ":", "mask", "=", "(", "'primaryNetworkComponent[maxSpeed]'", ")", "svc", "=", "self", ".", "client", "[", "'Virt...
Determines the appropriate speed for a firewall. :param int server_id: The ID of server the firewall is for :param bool is_virt: True if the server_id is for a virtual server :returns: a integer representing the Mbps speed of a firewall
[ "Determines", "the", "appropriate", "speed", "for", "a", "firewall", "." ]
python
train
nephics/mat4py
mat4py/loadmat.py
https://github.com/nephics/mat4py/blob/6c1a2ad903937437cc5f24f3c3f5aa2c5a77a1c1/mat4py/loadmat.py#L107-L122
def unpack(endian, fmt, data): """Unpack a byte string to the given format. If the byte string contains more bytes than required for the given format, the function returns a tuple of values. """ if fmt == 's': # read data as an array of chars val = struct.unpack(''.join([endian, str(len(data)), 's']), data)[0] else: # read a number of values num = len(data) // struct.calcsize(fmt) val = struct.unpack(''.join([endian, str(num), fmt]), data) if len(val) == 1: val = val[0] return val
[ "def", "unpack", "(", "endian", ",", "fmt", ",", "data", ")", ":", "if", "fmt", "==", "'s'", ":", "# read data as an array of chars", "val", "=", "struct", ".", "unpack", "(", "''", ".", "join", "(", "[", "endian", ",", "str", "(", "len", "(", "data"...
Unpack a byte string to the given format. If the byte string contains more bytes than required for the given format, the function returns a tuple of values.
[ "Unpack", "a", "byte", "string", "to", "the", "given", "format", ".", "If", "the", "byte", "string", "contains", "more", "bytes", "than", "required", "for", "the", "given", "format", "the", "function", "returns", "a", "tuple", "of", "values", "." ]
python
valid
saltstack/salt
salt/modules/debuild_pkgbuild.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/debuild_pkgbuild.py#L305-L315
def _get_src(tree_base, source, saltenv='base'): ''' Get the named sources and place them into the tree_base ''' parsed = _urlparse(source) sbase = os.path.basename(source) dest = os.path.join(tree_base, sbase) if parsed.scheme: __salt__['cp.get_url'](source, dest, saltenv=saltenv) else: shutil.copy(source, dest)
[ "def", "_get_src", "(", "tree_base", ",", "source", ",", "saltenv", "=", "'base'", ")", ":", "parsed", "=", "_urlparse", "(", "source", ")", "sbase", "=", "os", ".", "path", ".", "basename", "(", "source", ")", "dest", "=", "os", ".", "path", ".", ...
Get the named sources and place them into the tree_base
[ "Get", "the", "named", "sources", "and", "place", "them", "into", "the", "tree_base" ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/compare_comply_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3922-L3939
def _from_dict(cls, _dict): """Initialize a Parties object from a json dictionary.""" args = {} if 'party' in _dict: args['party'] = _dict.get('party') if 'importance' in _dict: args['importance'] = _dict.get('importance') if 'role' in _dict: args['role'] = _dict.get('role') if 'addresses' in _dict: args['addresses'] = [ Address._from_dict(x) for x in (_dict.get('addresses')) ] if 'contacts' in _dict: args['contacts'] = [ Contact._from_dict(x) for x in (_dict.get('contacts')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'party'", "in", "_dict", ":", "args", "[", "'party'", "]", "=", "_dict", ".", "get", "(", "'party'", ")", "if", "'importance'", "in", "_dict", ":", "args", "[", ...
Initialize a Parties object from a json dictionary.
[ "Initialize", "a", "Parties", "object", "from", "a", "json", "dictionary", "." ]
python
train
opencobra/memote
memote/support/validation.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/support/validation.py#L52-L61
def format_failure(failure): """Format how an error or warning should be displayed.""" return "Line {}, Column {} - #{}: {} - Category: {}, Severity: {}".format( failure.getLine(), failure.getColumn(), failure.getErrorId(), failure.getMessage(), failure.getCategoryAsString(), failure.getSeverity() )
[ "def", "format_failure", "(", "failure", ")", ":", "return", "\"Line {}, Column {} - #{}: {} - Category: {}, Severity: {}\"", ".", "format", "(", "failure", ".", "getLine", "(", ")", ",", "failure", ".", "getColumn", "(", ")", ",", "failure", ".", "getErrorId", "(...
Format how an error or warning should be displayed.
[ "Format", "how", "an", "error", "or", "warning", "should", "be", "displayed", "." ]
python
train
dtmilano/AndroidViewClient
src/com/dtmilano/android/viewclient.py
https://github.com/dtmilano/AndroidViewClient/blob/7e6e83fde63af99e5e4ab959712ecf94f9881aa2/src/com/dtmilano/android/viewclient.py#L2942-L2953
def serviceResponse(self, response): ''' Checks the response received from the I{ViewServer}. @return: C{True} if the response received matches L{PARCEL_TRUE}, C{False} otherwise ''' PARCEL_TRUE = "Result: Parcel(00000000 00000001 '........')\r\n" ''' The TRUE response parcel ''' if DEBUG: print >>sys.stderr, "serviceResponse: comparing '%s' vs Parcel(%s)" % (response, PARCEL_TRUE) return response == PARCEL_TRUE
[ "def", "serviceResponse", "(", "self", ",", "response", ")", ":", "PARCEL_TRUE", "=", "\"Result: Parcel(00000000 00000001 '........')\\r\\n\"", "''' The TRUE response parcel '''", "if", "DEBUG", ":", "print", ">>", "sys", ".", "stderr", ",", "\"serviceResponse: comparing ...
Checks the response received from the I{ViewServer}. @return: C{True} if the response received matches L{PARCEL_TRUE}, C{False} otherwise
[ "Checks", "the", "response", "received", "from", "the", "I", "{", "ViewServer", "}", "." ]
python
train
silenc3r/dikicli
dikicli/core.py
https://github.com/silenc3r/dikicli/blob/53721cdf75db04e2edca5ed3f99beae7c079d980/dikicli/core.py#L133-L202
def _parse_html(html_dump, native=False): """Parse html string. Parameters ---------- html_dump : str HTML content. native : bool, optional Whether to translate from native to foreign language. Returns ------- translations : list Translations list. Raises ------ WordNotFound If word can't be found. """ # pylint: disable=too-many-locals soup = BeautifulSoup(html_dump, "html.parser") translations = [] for entity in soup.select( "div.diki-results-left-column > div > div.dictionaryEntity" ): if not native: meanings = entity.select("ol.foreignToNativeMeanings") else: meanings = entity.select("ol.nativeToForeignEntrySlices") if not meanings: # this can happen when word exists in both polish and english, e.g. 'pet' continue word = tuple(e.get_text().strip() for e in entity.select("div.hws h1 span.hw")) parts = [p.get_text().strip() for p in entity.select("span.partOfSpeech")] parts_list = [] for part, m in zip_longest(parts, meanings): meanings = [] for elem in m.find_all("li", recursive=False): examples = [] if not native: meaning = [m.get_text().strip() for m in elem.select("span.hw")] pattern = re.compile(r"\s{3,}") for e in elem.find_all("div", class_="exampleSentence"): example = re.split(pattern, e.get_text().strip()) examples.append(example) else: meaning = [elem.find("span", recursive=False).get_text().strip()] # When translating to polish 'examples' are just synonyms of translation synonyms = ", ".join( sorted( set( x.get_text().strip() for x in elem.select("ul > li > span.hw") ) ) ) if synonyms: examples.append([synonyms, None]) meanings.append(Meaning(meaning, examples)) parts_list.append(PartOfSpeech(part, meanings)) translations.append(Translation(word, parts_list)) if translations: return translations # if translation wasn't found check if there are any suggestions suggestions = soup.find("div", class_="dictionarySuggestions") if suggestions: raise WordNotFound(suggestions.get_text().strip()) raise WordNotFound("Nie znaleziono tłumaczenia wpisanej frazy")
[ "def", "_parse_html", "(", "html_dump", ",", "native", "=", "False", ")", ":", "# pylint: disable=too-many-locals", "soup", "=", "BeautifulSoup", "(", "html_dump", ",", "\"html.parser\"", ")", "translations", "=", "[", "]", "for", "entity", "in", "soup", ".", ...
Parse html string. Parameters ---------- html_dump : str HTML content. native : bool, optional Whether to translate from native to foreign language. Returns ------- translations : list Translations list. Raises ------ WordNotFound If word can't be found.
[ "Parse", "html", "string", "." ]
python
train
jay-johnson/celery-loaders
celery_loaders/work_tasks/custom_task.py
https://github.com/jay-johnson/celery-loaders/blob/aca8169c774582af42a377c27cb3980020080814/celery_loaders/work_tasks/custom_task.py#L35-L54
def on_failure(self, exc, task_id, args, kwargs, einfo): """on_failure http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance :param exc: exception :param task_id: task id :param args: arguments passed into task :param kwargs: keyword arguments passed into task :param einfo: exception info """ use_exc = str(exc) log.error(("{} FAIL - exc={} " "args={} kwargs={}") .format( self.log_label, use_exc, args, kwargs))
[ "def", "on_failure", "(", "self", ",", "exc", ",", "task_id", ",", "args", ",", "kwargs", ",", "einfo", ")", ":", "use_exc", "=", "str", "(", "exc", ")", "log", ".", "error", "(", "(", "\"{} FAIL - exc={} \"", "\"args={} kwargs={}\"", ")", ".", "format",...
on_failure http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance :param exc: exception :param task_id: task id :param args: arguments passed into task :param kwargs: keyword arguments passed into task :param einfo: exception info
[ "on_failure" ]
python
train
PGower/PyCanvas
pycanvas/apis/groups.py
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/groups.py#L433-L455
def list_group_memberships(self, group_id, filter_states=None): """ List group memberships. List the members of a group. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # OPTIONAL - filter_states """Only list memberships with the given workflow_states. By default it will return all memberships.""" if filter_states is not None: self._validate_enum(filter_states, ["accepted", "invited", "requested"]) params["filter_states"] = filter_states self.logger.debug("GET /api/v1/groups/{group_id}/memberships with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/memberships".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_group_memberships", "(", "self", ",", "group_id", ",", "filter_states", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", ...
List group memberships. List the members of a group.
[ "List", "group", "memberships", ".", "List", "the", "members", "of", "a", "group", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/routing_system/interface/ve/ip/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/routing_system/interface/ve/ip/__init__.py#L145-L166
def _set_ip_anycast_address(self, v, load=False): """ Setter method for ip_anycast_address, mapped from YANG variable /routing_system/interface/ve/ip/ip_anycast_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ip_anycast_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_anycast_address() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("ip_address",ip_anycast_address.ip_anycast_address, yang_name="ip-anycast-address", rest_name="anycast-address", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-address', extensions={u'tailf-common': {u'callpoint': u'IntfVeAnycastIpAddrCallpoint', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'anycast-address', u'info': u'Set the IP address of an interface'}}), is_container='list', yang_name="ip-anycast-address", rest_name="anycast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'IntfVeAnycastIpAddrCallpoint', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'anycast-address', u'info': u'Set the IP address of an interface'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """ip_anycast_address must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("ip_address",ip_anycast_address.ip_anycast_address, yang_name="ip-anycast-address", rest_name="anycast-address", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-address', extensions={u'tailf-common': {u'callpoint': u'IntfVeAnycastIpAddrCallpoint', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'anycast-address', u'info': u'Set the IP address of an interface'}}), is_container='list', yang_name="ip-anycast-address", rest_name="anycast-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'IntfVeAnycastIpAddrCallpoint', u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'alt-name': u'anycast-address', u'info': u'Set the IP address of an interface'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='list', is_config=True)""", }) self.__ip_anycast_address = t if hasattr(self, '_set'): self._set()
[ "def", "_set_ip_anycast_address", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ","...
Setter method for ip_anycast_address, mapped from YANG variable /routing_system/interface/ve/ip/ip_anycast_address (list) If this variable is read-only (config: false) in the source YANG file, then _set_ip_anycast_address is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_ip_anycast_address() directly.
[ "Setter", "method", "for", "ip_anycast_address", "mapped", "from", "YANG", "variable", "/", "routing_system", "/", "interface", "/", "ve", "/", "ip", "/", "ip_anycast_address", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "conf...
python
train
opentracing-contrib/python-tornado
tornado_opentracing/application.py
https://github.com/opentracing-contrib/python-tornado/blob/2c87f423c316805c6140d7f0613c800dd05b47dc/tornado_opentracing/application.py#L31-L64
def tracer_config(__init__, app, args, kwargs): """ Wraps the Tornado web application initialization so that the TornadoTracing instance is created around an OpenTracing-compatible tracer. """ __init__(*args, **kwargs) tracing = app.settings.get('opentracing_tracing') tracer_callable = app.settings.get('opentracing_tracer_callable') tracer_parameters = app.settings.get('opentracing_tracer_parameters', {}) if tracer_callable is not None: if not callable(tracer_callable): tracer_callable = _get_callable_from_name(tracer_callable) tracer = tracer_callable(**tracer_parameters) tracing = TornadoTracing(tracer) if tracing is None: tracing = TornadoTracing() # fallback to the global tracer app.settings['opentracing_tracing'] = tracing tracing._trace_all = app.settings.get('opentracing_trace_all', DEFAULT_TRACE_ALL) tracing._trace_client = app.settings.get('opentracing_trace_client', DEFAULT_TRACE_CLIENT) tracing._start_span_cb = app.settings.get('opentracing_start_span_cb', None) httpclient._set_tracing_enabled(tracing._trace_client) if tracing._trace_client: httpclient._set_tracing_info(tracing._tracer_obj, tracing._start_span_cb)
[ "def", "tracer_config", "(", "__init__", ",", "app", ",", "args", ",", "kwargs", ")", ":", "__init__", "(", "*", "args", ",", "*", "*", "kwargs", ")", "tracing", "=", "app", ".", "settings", ".", "get", "(", "'opentracing_tracing'", ")", "tracer_callable...
Wraps the Tornado web application initialization so that the TornadoTracing instance is created around an OpenTracing-compatible tracer.
[ "Wraps", "the", "Tornado", "web", "application", "initialization", "so", "that", "the", "TornadoTracing", "instance", "is", "created", "around", "an", "OpenTracing", "-", "compatible", "tracer", "." ]
python
train
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/PyDbLite_conversions.py#L8-L26
def toCSV(pdl,out=None,write_field_names=True): """Conversion from the PyDbLite Base instance pdl to the file object out open for writing in binary mode If out is not specified, the field name is the same as the PyDbLite file with extension .csv If write_field_names is True, field names are written at the top of the CSV file""" import csv if out is None: file_name = os.path.splitext(pdl.name)[0]+".csv" out = open(file_name,"wb") fields = ["__id__","__version__"]+pdl.fields writer = csv.DictWriter(out,fields) # write field names if write_field_names: writer.writerow(dict([(k,k) for k in fields])) # write values writer.writerows(pdl()) return file_name
[ "def", "toCSV", "(", "pdl", ",", "out", "=", "None", ",", "write_field_names", "=", "True", ")", ":", "import", "csv", "if", "out", "is", "None", ":", "file_name", "=", "os", ".", "path", ".", "splitext", "(", "pdl", ".", "name", ")", "[", "0", "...
Conversion from the PyDbLite Base instance pdl to the file object out open for writing in binary mode If out is not specified, the field name is the same as the PyDbLite file with extension .csv If write_field_names is True, field names are written at the top of the CSV file
[ "Conversion", "from", "the", "PyDbLite", "Base", "instance", "pdl", "to", "the", "file", "object", "out", "open", "for", "writing", "in", "binary", "mode", "If", "out", "is", "not", "specified", "the", "field", "name", "is", "the", "same", "as", "the", "...
python
train
maljovec/topopy
topopy/ContourTree.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/ContourTree.py#L91-L142
def build(self, X, Y, w=None, edges=None): """ Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph. """ super(ContourTree, self).build(X, Y, w, edges) # Build the join and split trees that we will merge into the # contour tree joinTree = MergeTree(debug=self.debug) splitTree = MergeTree(debug=self.debug) joinTree.build_for_contour_tree(self, True) splitTree.build_for_contour_tree(self, False) self.augmentedEdges = dict(joinTree.augmentedEdges) self.augmentedEdges.update(dict(splitTree.augmentedEdges)) if self.short_circuit: jt = self._construct_nx_tree(joinTree, splitTree) st = self._construct_nx_tree(splitTree, joinTree) else: jt = self._construct_nx_tree(joinTree) st = self._construct_nx_tree(splitTree) self._process_tree(jt, st) self._process_tree(st, jt) # Now we have a fully augmented contour tree stored in nodes and # edges The rest is some convenience stuff for querying later self._identifyBranches() self._identifySuperGraph() if self.debug: sys.stdout.write("Sorting Nodes: ") start = time.clock() self.sortedNodes = sorted(enumerate(self.Y), key=operator.itemgetter(1)) if self.debug: end = time.clock() sys.stdout.write("%f s\n" % (end - start))
[ "def", "build", "(", "self", ",", "X", ",", "Y", ",", "w", "=", "None", ",", "edges", "=", "None", ")", ":", "super", "(", "ContourTree", ",", "self", ")", ".", "build", "(", "X", ",", "Y", ",", "w", ",", "edges", ")", "# Build the join and split...
Assigns data to this object and builds the Morse-Smale Complex @ In, X, an m-by-n array of values specifying m n-dimensional samples @ In, Y, a m vector of values specifying the output responses corresponding to the m samples specified by X @ In, w, an optional m vector of values specifying the weights associated to each of the m samples used. Default of None means all points will be equally weighted @ In, edges, an optional list of custom edges to use as a starting point for pruning, or in place of a computed graph.
[ "Assigns", "data", "to", "this", "object", "and", "builds", "the", "Morse", "-", "Smale", "Complex" ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/ir_sanity_checks.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/ir_sanity_checks.py#L131-L141
def _sanity_check_output_source_follower_blocks(ir_blocks): """Ensure there are no Traverse / Backtrack / Recurse blocks after an OutputSource block.""" seen_output_source = False for block in ir_blocks: if isinstance(block, OutputSource): seen_output_source = True elif seen_output_source: if isinstance(block, (Backtrack, Traverse, Recurse)): raise AssertionError(u'Found Backtrack / Traverse / Recurse ' u'after OutputSource block: ' u'{}'.format(ir_blocks))
[ "def", "_sanity_check_output_source_follower_blocks", "(", "ir_blocks", ")", ":", "seen_output_source", "=", "False", "for", "block", "in", "ir_blocks", ":", "if", "isinstance", "(", "block", ",", "OutputSource", ")", ":", "seen_output_source", "=", "True", "elif", ...
Ensure there are no Traverse / Backtrack / Recurse blocks after an OutputSource block.
[ "Ensure", "there", "are", "no", "Traverse", "/", "Backtrack", "/", "Recurse", "blocks", "after", "an", "OutputSource", "block", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/sp_paper/sp_metrics.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/sp_paper/sp_metrics.py#L515-L528
def classifySPoutput(targetOutputColumns, outputColumns): """ Classify the SP output @param targetOutputColumns (list) The target outputs, corresponding to different classes @param outputColumns (array) The current output @return classLabel (int) classification outcome """ numTargets, numDims = targetOutputColumns.shape overlap = np.zeros((numTargets,)) for i in range(numTargets): overlap[i] = percentOverlap(outputColumns, targetOutputColumns[i, :]) classLabel = np.argmax(overlap) return classLabel
[ "def", "classifySPoutput", "(", "targetOutputColumns", ",", "outputColumns", ")", ":", "numTargets", ",", "numDims", "=", "targetOutputColumns", ".", "shape", "overlap", "=", "np", ".", "zeros", "(", "(", "numTargets", ",", ")", ")", "for", "i", "in", "range...
Classify the SP output @param targetOutputColumns (list) The target outputs, corresponding to different classes @param outputColumns (array) The current output @return classLabel (int) classification outcome
[ "Classify", "the", "SP", "output" ]
python
train
orbingol/NURBS-Python
geomdl/voxelize.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/voxelize.py#L16-L56
def voxelize(obj, **kwargs): """ Generates binary voxel representation of the surfaces and volumes. Keyword Arguments: * ``grid_size``: size of the voxel grid. *Default: (8, 8, 8)* * ``padding``: voxel padding for in-outs finding. *Default: 10e-8* * ``use_cubes``: use cube voxels instead of cuboid ones. *Default: False* * ``num_procs``: number of concurrent processes for voxelization. *Default: 1* :param obj: input surface(s) or volume(s) :type obj: abstract.Surface or abstract.Volume :return: voxel grid and filled information :rtype: tuple """ # Get keyword arguments grid_size = kwargs.pop('grid_size', (8, 8, 8)) use_cubes = kwargs.pop('use_cubes', False) num_procs = kwargs.get('num_procs', 1) if not isinstance(grid_size, (list, tuple)): raise TypeError("Grid size must be a list or a tuple of integers") # Initialize result arrays grid = [] filled = [] # Should also work with multi surfaces and volumes for o in obj: # Generate voxel grid grid_temp = vxl.generate_voxel_grid(o.bbox, grid_size, use_cubes=use_cubes) args = [grid_temp, o.evalpts] # Find in-outs filled_temp = vxl.find_inouts_mp(*args, **kwargs) if num_procs > 1 else vxl.find_inouts_st(*args, **kwargs) # Add to result arrays grid += grid_temp filled += filled_temp # Return result arrays return grid, filled
[ "def", "voxelize", "(", "obj", ",", "*", "*", "kwargs", ")", ":", "# Get keyword arguments", "grid_size", "=", "kwargs", ".", "pop", "(", "'grid_size'", ",", "(", "8", ",", "8", ",", "8", ")", ")", "use_cubes", "=", "kwargs", ".", "pop", "(", "'use_c...
Generates binary voxel representation of the surfaces and volumes. Keyword Arguments: * ``grid_size``: size of the voxel grid. *Default: (8, 8, 8)* * ``padding``: voxel padding for in-outs finding. *Default: 10e-8* * ``use_cubes``: use cube voxels instead of cuboid ones. *Default: False* * ``num_procs``: number of concurrent processes for voxelization. *Default: 1* :param obj: input surface(s) or volume(s) :type obj: abstract.Surface or abstract.Volume :return: voxel grid and filled information :rtype: tuple
[ "Generates", "binary", "voxel", "representation", "of", "the", "surfaces", "and", "volumes", "." ]
python
train
datacamp/antlr-ast
antlr_ast/ast.py
https://github.com/datacamp/antlr-ast/blob/d08d5eb2e663bd40501d0eeddc8a731ac7e96b11/antlr_ast/ast.py#L207-L216
def isinstance(self, instance, class_name): """Check if a BaseNode is an instance of a registered dynamic class""" if isinstance(instance, BaseNode): klass = self.dynamic_node_classes.get(class_name, None) if klass: return isinstance(instance, klass) # Not an instance of a class in the registry return False else: raise TypeError("This function can only be used for BaseNode objects")
[ "def", "isinstance", "(", "self", ",", "instance", ",", "class_name", ")", ":", "if", "isinstance", "(", "instance", ",", "BaseNode", ")", ":", "klass", "=", "self", ".", "dynamic_node_classes", ".", "get", "(", "class_name", ",", "None", ")", "if", "kla...
Check if a BaseNode is an instance of a registered dynamic class
[ "Check", "if", "a", "BaseNode", "is", "an", "instance", "of", "a", "registered", "dynamic", "class" ]
python
train
pytroll/satpy
satpy/readers/ahi_hsd.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/readers/ahi_hsd.py#L310-L313
def scheduled_time(self): """Time this band was scheduled to be recorded.""" timeline = "{:04d}".format(self.basic_info['observation_timeline'][0]) return self.start_time.replace(hour=int(timeline[:2]), minute=int(timeline[2:4]), second=0, microsecond=0)
[ "def", "scheduled_time", "(", "self", ")", ":", "timeline", "=", "\"{:04d}\"", ".", "format", "(", "self", ".", "basic_info", "[", "'observation_timeline'", "]", "[", "0", "]", ")", "return", "self", ".", "start_time", ".", "replace", "(", "hour", "=", "...
Time this band was scheduled to be recorded.
[ "Time", "this", "band", "was", "scheduled", "to", "be", "recorded", "." ]
python
train
ewels/MultiQC
multiqc/modules/featureCounts/feature_counts.py
https://github.com/ewels/MultiQC/blob/2037d6322b2554146a74efbf869156ad20d4c4ec/multiqc/modules/featureCounts/feature_counts.py#L52-L103
def parse_featurecounts_report (self, f): """ Parse the featureCounts log file. """ file_names = list() parsed_data = dict() for l in f['f'].splitlines(): thisrow = list() s = l.split("\t") if len(s) < 2: continue if s[0] == 'Status': for f_name in s[1:]: file_names.append(f_name) else: k = s[0] if k not in self.featurecounts_keys: self.featurecounts_keys.append(k) for val in s[1:]: try: thisrow.append(int(val)) except ValueError: pass if len(thisrow) > 0: parsed_data[k] = thisrow # Check that this actually is a featureCounts file, as format and parsing is quite general if 'Assigned' not in parsed_data.keys(): return None for idx, f_name in enumerate(file_names): # Clean up sample name s_name = self.clean_s_name(f_name, f['root']) # Reorganised parsed data for this sample # Collect total count number data = dict() data['Total'] = 0 for k in parsed_data: data[k] = parsed_data[k][idx] data['Total'] += parsed_data[k][idx] # Calculate the percent aligned if we can try: data['percent_assigned'] = (float(data['Assigned'])/float(data['Total'])) * 100.0 except (KeyError, ZeroDivisionError): pass # Add to the main dictionary if len(data) > 1: if s_name in self.featurecounts_data: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name) self.featurecounts_data[s_name] = data
[ "def", "parse_featurecounts_report", "(", "self", ",", "f", ")", ":", "file_names", "=", "list", "(", ")", "parsed_data", "=", "dict", "(", ")", "for", "l", "in", "f", "[", "'f'", "]", ".", "splitlines", "(", ")", ":", "thisrow", "=", "list", "(", ...
Parse the featureCounts log file.
[ "Parse", "the", "featureCounts", "log", "file", "." ]
python
train
deschler/django-modeltranslation
modeltranslation/fields.py
https://github.com/deschler/django-modeltranslation/blob/18fec04a5105cbd83fc3759f4fda20135b3a848c/modeltranslation/fields.py#L277-L291
def south_field_triple(self): """ Returns a suitable description of this field for South. """ # We'll just introspect the _actual_ field. from south.modelsinspector import introspector try: # Check if the field provides its own 'field_class': field_class = self.translated_field.south_field_triple()[0] except AttributeError: field_class = '%s.%s' % (self.translated_field.__class__.__module__, self.translated_field.__class__.__name__) args, kwargs = introspector(self) # That's our definition! return (field_class, args, kwargs)
[ "def", "south_field_triple", "(", "self", ")", ":", "# We'll just introspect the _actual_ field.", "from", "south", ".", "modelsinspector", "import", "introspector", "try", ":", "# Check if the field provides its own 'field_class':", "field_class", "=", "self", ".", "translat...
Returns a suitable description of this field for South.
[ "Returns", "a", "suitable", "description", "of", "this", "field", "for", "South", "." ]
python
train
Yelp/py_zipkin
py_zipkin/encoding/_decoders.py
https://github.com/Yelp/py_zipkin/blob/0944d9a3fb1f1798dbb276694aeed99f2b4283ba/py_zipkin/encoding/_decoders.py#L146-L178
def _convert_from_thrift_binary_annotations(self, thrift_binary_annotations): """Accepts a thrift decoded binary annotation and converts it to a v1 binary annotation. """ tags = {} local_endpoint = None remote_endpoint = None for binary_annotation in thrift_binary_annotations: if binary_annotation.key == 'sa': remote_endpoint = self._convert_from_thrift_endpoint( thrift_endpoint=binary_annotation.host, ) else: key = binary_annotation.key annotation_type = binary_annotation.annotation_type value = binary_annotation.value if annotation_type == zipkin_core.AnnotationType.BOOL: tags[key] = "true" if value == 1 else "false" elif annotation_type == zipkin_core.AnnotationType.STRING: tags[key] = str(value) else: log.warning('Only STRING and BOOL binary annotations are ' 'supported right now and can be properly decoded.') if binary_annotation.host: local_endpoint = self._convert_from_thrift_endpoint( thrift_endpoint=binary_annotation.host, ) return tags, local_endpoint, remote_endpoint
[ "def", "_convert_from_thrift_binary_annotations", "(", "self", ",", "thrift_binary_annotations", ")", ":", "tags", "=", "{", "}", "local_endpoint", "=", "None", "remote_endpoint", "=", "None", "for", "binary_annotation", "in", "thrift_binary_annotations", ":", "if", "...
Accepts a thrift decoded binary annotation and converts it to a v1 binary annotation.
[ "Accepts", "a", "thrift", "decoded", "binary", "annotation", "and", "converts", "it", "to", "a", "v1", "binary", "annotation", "." ]
python
test
biolink/ontobio
ontobio/assoc_factory.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/ontobio/assoc_factory.py#L36-L86
def create(self, ontology=None,subject_category=None,object_category=None,evidence=None,taxon=None,relation=None, file=None, fmt=None, skim=True): """ creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID """ meta = AssociationSetMetadata(subject_category=subject_category, object_category=object_category, taxon=taxon) if file is not None: return self.create_from_file(file=file, fmt=fmt, ontology=ontology, meta=meta, skim=skim) logging.info("Fetching assocs from store") assocs = bulk_fetch_cached(subject_category=subject_category, object_category=object_category, evidence=evidence, taxon=taxon) logging.info("Creating map for {} subjects".format(len(assocs))) amap = {} subject_label_map = {} for a in assocs: rel = a['relation'] subj = a['subject'] subject_label_map[subj] = a['subject_label'] amap[subj] = a['objects'] aset = AssociationSet(ontology=ontology, meta=meta, subject_label_map=subject_label_map, association_map=amap) return aset
[ "def", "create", "(", "self", ",", "ontology", "=", "None", ",", "subject_category", "=", "None", ",", "object_category", "=", "None", ",", "evidence", "=", "None", ",", "taxon", "=", "None", ",", "relation", "=", "None", ",", "file", "=", "None", ",",...
creates an AssociationSet Currently, this uses an eager binding to a `ontobio.golr` instance. All compact associations for the particular combination of parameters are fetched. Arguments --------- ontology: an `Ontology` object subject_category: string representing category of subjects (e.g. gene, disease, variant) object_category: string representing category of objects (e.g. function, phenotype, disease) taxon: string holding NCBITaxon:nnnn ID
[ "creates", "an", "AssociationSet" ]
python
train
minio/minio-py
minio/parsers.py
https://github.com/minio/minio-py/blob/7107c84183cf5fb4deff68c0a16ab9f1c0b4c37e/minio/parsers.py#L155-L169
def parse_multipart_upload_result(data): """ Parser for complete multipart upload response. :param data: Response data for complete multipart upload. :return: :class:`MultipartUploadResult <MultipartUploadResult>`. """ root = S3Element.fromstring('CompleteMultipartUploadResult', data) return MultipartUploadResult( root.get_child_text('Bucket'), root.get_child_text('Key'), root.get_child_text('Location'), root.get_etag_elem() )
[ "def", "parse_multipart_upload_result", "(", "data", ")", ":", "root", "=", "S3Element", ".", "fromstring", "(", "'CompleteMultipartUploadResult'", ",", "data", ")", "return", "MultipartUploadResult", "(", "root", ".", "get_child_text", "(", "'Bucket'", ")", ",", ...
Parser for complete multipart upload response. :param data: Response data for complete multipart upload. :return: :class:`MultipartUploadResult <MultipartUploadResult>`.
[ "Parser", "for", "complete", "multipart", "upload", "response", "." ]
python
train
Ezhil-Language-Foundation/open-tamil
ngram/LetterModels.py
https://github.com/Ezhil-Language-Foundation/open-tamil/blob/b7556e88878d29bbc6c944ee17cdd3f75b8ea9f0/ngram/LetterModels.py#L65-L78
def language_model(self,verbose=True): """ builds a Tamil bigram letter model """ # use a generator in corpus prev = None for next_letter in self.corpus.next_tamil_letter(): # update frequency from corpus if prev: self.letter2[prev][next_letter] += 1 if ( verbose ) : print(prev) print(next_letter) print( self.letter2[prev][next_letter] ) prev = next_letter #update always return
[ "def", "language_model", "(", "self", ",", "verbose", "=", "True", ")", ":", "# use a generator in corpus", "prev", "=", "None", "for", "next_letter", "in", "self", ".", "corpus", ".", "next_tamil_letter", "(", ")", ":", "# update frequency from corpus", "if", "...
builds a Tamil bigram letter model
[ "builds", "a", "Tamil", "bigram", "letter", "model" ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/container_state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/container_state.py#L1978-L2000
def _check_transition_target(self, transition): """Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid """ to_state_id = transition.to_state to_outcome_id = transition.to_outcome if to_state_id == self.state_id: if to_outcome_id not in self.outcomes: return False, "to_outcome is not existing" else: if to_state_id not in self.states: return False, "to_state is not existing" if to_outcome_id is not None: return False, "to_outcome must be None as transition goes to child state" return True, "valid"
[ "def", "_check_transition_target", "(", "self", ",", "transition", ")", ":", "to_state_id", "=", "transition", ".", "to_state", "to_outcome_id", "=", "transition", ".", "to_outcome", "if", "to_state_id", "==", "self", ".", "state_id", ":", "if", "to_outcome_id", ...
Checks the validity of a transition target Checks whether the transition target is valid. :param rafcon.core.transition.Transition transition: The transition to be checked :return bool validity, str message: validity is True, when the transition is valid, False else. message gives more information especially if the transition is not valid
[ "Checks", "the", "validity", "of", "a", "transition", "target" ]
python
train
wavycloud/pyboto3
pyboto3/iam.py
https://github.com/wavycloud/pyboto3/blob/924957ccf994303713a4eed90b775ff2ab95b2e5/pyboto3/iam.py#L4415-L4592
def simulate_principal_policy(PolicySourceArn=None, PolicyInputList=None, ActionNames=None, ResourceArns=None, ResourcePolicy=None, ResourceOwner=None, CallerArn=None, ContextEntries=None, ResourceHandlingOption=None, MaxItems=None, Marker=None): """ Simulate how a set of IAM policies attached to an IAM entity works with a list of API actions and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to . You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead. You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation. The simulation does not perform the API actions, it only checks the authorization to determine if the simulated policies allow or deny the actions. Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy . If the output is long, you can use the MaxItems and Marker parameters to paginate the results. See also: AWS API Documentation :example: response = client.simulate_principal_policy( PolicySourceArn='string', PolicyInputList=[ 'string', ], ActionNames=[ 'string', ], ResourceArns=[ 'string', ], ResourcePolicy='string', ResourceOwner='string', CallerArn='string', ContextEntries=[ { 'ContextKeyName': 'string', 'ContextKeyValues': [ 'string', ], 'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList' }, ], ResourceHandlingOption='string', MaxItems=123, Marker='string' ) :type PolicySourceArn: string :param PolicySourceArn: [REQUIRED] The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type PolicyInputList: list :param PolicyInputList: An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). (string) -- :type ActionNames: list :param ActionNames: [REQUIRED] A list of names of API actions to evaluate in the simulation. Each action is evaluated for each resource. Each action must include the service identifier, such as iam:CreateUser . (string) -- :type ResourceArns: list :param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response. The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . (string) -- :type ResourcePolicy: string :param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). :type ResourceOwner: string :param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn . :type CallerArn: string :param CallerArn: The ARN of the IAM user that you want to specify as the simulated caller of the APIs. If you do not specify a CallerArn , it defaults to the ARN of the user that you specify in PolicySourceArn , if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David ) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob ), the result is that you simulate calling the APIs as Bob, as if Bob had David's policies. You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal. CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type ContextEntries: list :param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied. (dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies. This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` . ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId . ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy. (string) -- ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter. :type ResourceHandlingOption: string :param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation. Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide . EC2-Classic-InstanceStore instance, image, security-group EC2-Classic-EBS instance, image, security-group, volume EC2-VPC-InstanceStore instance, image, security-group, network-interface EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet EC2-VPC-EBS instance, image, security-group, network-interface, volume EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume :type MaxItems: integer :param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true . If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from. :type Marker: string :param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start. :rtype: dict :return: { 'EvaluationResults': [ { 'EvalActionName': 'string', 'EvalResourceName': 'string', 'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'OrganizationsDecisionDetail': { 'AllowedByOrganizations': True|False }, 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' }, 'ResourceSpecificResults': [ { 'EvalResourceName': 'string', 'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' } }, ] }, ], 'IsTruncated': True|False, 'Marker': 'string' } :returns: (string) -- """ pass
[ "def", "simulate_principal_policy", "(", "PolicySourceArn", "=", "None", ",", "PolicyInputList", "=", "None", ",", "ActionNames", "=", "None", ",", "ResourceArns", "=", "None", ",", "ResourcePolicy", "=", "None", ",", "ResourceOwner", "=", "None", ",", "CallerAr...
Simulate how a set of IAM policies attached to an IAM entity works with a list of API actions and AWS resources to determine the policies' effective permissions. The entity can be an IAM user, group, or role. If you specify a user, then the simulation also includes all of the policies that are attached to groups that the user belongs to . You can optionally include a list of one or more additional policies specified as strings to include in the simulation. If you want to simulate only policies specified as strings, use SimulateCustomPolicy instead. You can also optionally include one resource-based policy to be evaluated with each of the resources included in the simulation. The simulation does not perform the API actions, it only checks the authorization to determine if the simulated policies allow or deny the actions. Context keys are variables maintained by AWS and its services that provide details about the context of an API query request. You can use the Condition element of an IAM policy to evaluate context keys. To get the list of context keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy . If the output is long, you can use the MaxItems and Marker parameters to paginate the results. See also: AWS API Documentation :example: response = client.simulate_principal_policy( PolicySourceArn='string', PolicyInputList=[ 'string', ], ActionNames=[ 'string', ], ResourceArns=[ 'string', ], ResourcePolicy='string', ResourceOwner='string', CallerArn='string', ContextEntries=[ { 'ContextKeyName': 'string', 'ContextKeyValues': [ 'string', ], 'ContextKeyType': 'string'|'stringList'|'numeric'|'numericList'|'boolean'|'booleanList'|'ip'|'ipList'|'binary'|'binaryList'|'date'|'dateList' }, ], ResourceHandlingOption='string', MaxItems=123, Marker='string' ) :type PolicySourceArn: string :param PolicySourceArn: [REQUIRED] The Amazon Resource Name (ARN) of a user, group, or role whose policies you want to include in the simulation. If you specify a user, group, or role, the simulation includes all policies that are associated with that entity. If you specify a user, the simulation also includes all policies that are attached to any groups the user belongs to. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type PolicyInputList: list :param PolicyInputList: An optional list of additional policy documents to include in the simulation. Each document is specified as a string containing the complete, valid JSON text of an IAM policy. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). (string) -- :type ActionNames: list :param ActionNames: [REQUIRED] A list of names of API actions to evaluate in the simulation. Each action is evaluated for each resource. Each action must include the service identifier, such as iam:CreateUser . (string) -- :type ResourceArns: list :param ResourceArns: A list of ARNs of AWS resources to include in the simulation. If this parameter is not provided then the value defaults to * (all resources). Each API in the ActionNames parameter is evaluated for each resource in this list. The simulation determines the access result (allowed or denied) of each combination and reports it in the response. The simulation does not automatically retrieve policies for the specified resources. If you want to include a resource policy in the simulation, then you must include the policy as a string in the ResourcePolicy parameter. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . (string) -- :type ResourcePolicy: string :param ResourcePolicy: A resource-based policy to include in the simulation provided as a string. Each resource in the simulation is treated as if it had this policy attached. You can include only one resource-based policy in a simulation. The regex pattern used to validate this parameter is a string of characters consisting of any printable ASCII character ranging from the space character (u0020) through end of the ASCII character range as well as the printable characters in the Basic Latin and Latin-1 Supplement character set (through u00FF). It also includes the special characters tab (u0009), line feed (u000A), and carriage return (u000D). :type ResourceOwner: string :param ResourceOwner: An AWS account ID that specifies the owner of any simulated resource that does not identify its owner in the resource ARN, such as an S3 bucket or object. If ResourceOwner is specified, it is also used as the account owner of any ResourcePolicy included in the simulation. If the ResourceOwner parameter is not specified, then the owner of the resources and the resource policy defaults to the account of the identity provided in CallerArn . This parameter is required only if you specify a resource-based policy and account that owns the resource is different from the account that owns the simulated calling user CallerArn . :type CallerArn: string :param CallerArn: The ARN of the IAM user that you want to specify as the simulated caller of the APIs. If you do not specify a CallerArn , it defaults to the ARN of the user that you specify in PolicySourceArn , if you specified a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David ) and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob ), the result is that you simulate calling the APIs as Bob, as if Bob had David's policies. You can specify only the ARN of an IAM user. You cannot specify the ARN of an assumed role, federated user, or a service principal. CallerArn is required if you include a ResourcePolicy and the PolicySourceArn is not the ARN for an IAM user. This is required so that the resource-based policy's Principal element has a value to use in evaluating the policy. For more information about ARNs, see Amazon Resource Names (ARNs) and AWS Service Namespaces in the AWS General Reference . :type ContextEntries: list :param ContextEntries: A list of context keys and corresponding values for the simulation to use. Whenever a context key is evaluated in one of the simulated IAM permission policies, the corresponding value is supplied. (dict) --Contains information about a condition context key. It includes the name of the key and specifies the value (or values, if the context key supports multiple values) to use in the simulation. This information is used when evaluating the Condition elements of the input policies. This data type is used as an input parameter to `` SimulateCustomPolicy `` and `` SimulateCustomPolicy `` . ContextKeyName (string) --The full name of a condition context key, including the service prefix. For example, aws:SourceIp or s3:VersionId . ContextKeyValues (list) --The value (or values, if the condition context key supports multiple values) to provide to the simulation for use when the key is referenced by a Condition element in an input policy. (string) -- ContextKeyType (string) --The data type of the value (or values) specified in the ContextKeyValues parameter. :type ResourceHandlingOption: string :param ResourceHandlingOption: Specifies the type of simulation to run. Different APIs that support resource-based policies require different combinations of resources. By specifying the type of simulation to run, you enable the policy simulator to enforce the presence of the required resources to ensure reliable simulation results. If your simulation does not match one of the following scenarios, then you can omit this parameter. The following list shows each of the supported scenario values and the resources that you must define to run the simulation. Each of the EC2 scenarios requires that you specify instance, image, and security-group resources. If your scenario includes an EBS volume, then you must specify that volume as a resource. If the EC2 scenario includes VPC, then you must supply the network-interface resource. If it includes an IP subnet, then you must specify the subnet resource. For more information on the EC2 scenario options, see Supported Platforms in the AWS EC2 User Guide . EC2-Classic-InstanceStore instance, image, security-group EC2-Classic-EBS instance, image, security-group, volume EC2-VPC-InstanceStore instance, image, security-group, network-interface EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, subnet EC2-VPC-EBS instance, image, security-group, network-interface, volume EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, subnet, volume :type MaxItems: integer :param MaxItems: (Optional) Use this only when paginating results to indicate the maximum number of items you want in the response. If additional items exist beyond the maximum you specify, the IsTruncated response element is true . If you do not include this parameter, it defaults to 100. Note that IAM might return fewer results, even when there are more results available. In that case, the IsTruncated response element returns true and Marker contains a value to include in the subsequent call that tells the service where to continue from. :type Marker: string :param Marker: Use this parameter only when paginating results and only after you receive a response indicating that the results are truncated. Set it to the value of the Marker element in the response that you received to indicate where the next call should start. :rtype: dict :return: { 'EvaluationResults': [ { 'EvalActionName': 'string', 'EvalResourceName': 'string', 'EvalDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'OrganizationsDecisionDetail': { 'AllowedByOrganizations': True|False }, 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' }, 'ResourceSpecificResults': [ { 'EvalResourceName': 'string', 'EvalResourceDecision': 'allowed'|'explicitDeny'|'implicitDeny', 'MatchedStatements': [ { 'SourcePolicyId': 'string', 'SourcePolicyType': 'user'|'group'|'role'|'aws-managed'|'user-managed'|'resource'|'none', 'StartPosition': { 'Line': 123, 'Column': 123 }, 'EndPosition': { 'Line': 123, 'Column': 123 } }, ], 'MissingContextValues': [ 'string', ], 'EvalDecisionDetails': { 'string': 'allowed'|'explicitDeny'|'implicitDeny' } }, ] }, ], 'IsTruncated': True|False, 'Marker': 'string' } :returns: (string) --
[ "Simulate", "how", "a", "set", "of", "IAM", "policies", "attached", "to", "an", "IAM", "entity", "works", "with", "a", "list", "of", "API", "actions", "and", "AWS", "resources", "to", "determine", "the", "policies", "effective", "permissions", ".", "The", ...
python
train
ramrod-project/database-brain
schema/brain/decorators.py
https://github.com/ramrod-project/database-brain/blob/b024cb44f34cabb9d80af38271ddb65c25767083/schema/brain/decorators.py#L45-L61
def deprecated_function(func_, replacement="(see docs)", *args, **kwargs): """ decorator to annotate deprecated functions usage @decorator(replacement="brain.whatever.new_function") :param func_: <callable> :param replacement: <str> :param args: positional arguments :param kwargs: :return: <func_'s return value> """ msg = "{} is deprecated, use {}\n".format(func_.__name__, replacement) stderr.write(msg) return func_(*args, **kwargs)
[ "def", "deprecated_function", "(", "func_", ",", "replacement", "=", "\"(see docs)\"", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "msg", "=", "\"{} is deprecated, use {}\\n\"", ".", "format", "(", "func_", ".", "__name__", ",", "replacement", ")", ...
decorator to annotate deprecated functions usage @decorator(replacement="brain.whatever.new_function") :param func_: <callable> :param replacement: <str> :param args: positional arguments :param kwargs: :return: <func_'s return value>
[ "decorator", "to", "annotate", "deprecated", "functions" ]
python
train
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_binaries.py
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_binaries.py#L190-L200
def GaP_Eg_Gamma(self, **kwargs): ''' Returns the Gamma-valley bandgap, Eg_Gamma, in electron Volts at a given temperature, T, in Kelvin (default: 300 K). GaP has a unique Gamma-gap temperature dependence. ''' T = kwargs.get('T', 300.) if T < 1e-4: return self.Eg_Gamma_0() return self.Eg_Gamma_0() + 0.1081 * (1 - 1. / tanh(164. / T)) # eV
[ "def", "GaP_Eg_Gamma", "(", "self", ",", "*", "*", "kwargs", ")", ":", "T", "=", "kwargs", ".", "get", "(", "'T'", ",", "300.", ")", "if", "T", "<", "1e-4", ":", "return", "self", ".", "Eg_Gamma_0", "(", ")", "return", "self", ".", "Eg_Gamma_0", ...
Returns the Gamma-valley bandgap, Eg_Gamma, in electron Volts at a given temperature, T, in Kelvin (default: 300 K). GaP has a unique Gamma-gap temperature dependence.
[ "Returns", "the", "Gamma", "-", "valley", "bandgap", "Eg_Gamma", "in", "electron", "Volts", "at", "a", "given", "temperature", "T", "in", "Kelvin", "(", "default", ":", "300", "K", ")", "." ]
python
train
hvac/hvac
hvac/api/system_backend/leader.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/system_backend/leader.py#L6-L19
def read_leader_status(self): """Read the high availability status and current leader instance of Vault. Supported methods: GET: /sys/leader. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict """ api_path = '/v1/sys/leader' response = self._adapter.get( url=api_path, ) return response.json()
[ "def", "read_leader_status", "(", "self", ")", ":", "api_path", "=", "'/v1/sys/leader'", "response", "=", "self", ".", "_adapter", ".", "get", "(", "url", "=", "api_path", ",", ")", "return", "response", ".", "json", "(", ")" ]
Read the high availability status and current leader instance of Vault. Supported methods: GET: /sys/leader. Produces: 200 application/json :return: The JSON response of the request. :rtype: dict
[ "Read", "the", "high", "availability", "status", "and", "current", "leader", "instance", "of", "Vault", "." ]
python
train
openstack/monasca-common
monasca_common/kafka_lib/protocol.py
https://github.com/openstack/monasca-common/blob/61e2e00454734e2881611abec8df0d85bf7655ac/monasca_common/kafka_lib/protocol.py#L126-L158
def _decode_message_set_iter(cls, data): """ Iteratively decode a MessageSet Reads repeated elements of (offset, message), calling decode_message to decode a single message. Since compressed messages contain futher MessageSets, these two methods have been decoupled so that they may recurse easily. """ cur = 0 read_message = False while cur < len(data): try: ((offset, ), cur) = relative_unpack('>q', data, cur) (msg, cur) = read_int_string(data, cur) for (offset, message) in KafkaProtocol._decode_message(msg, offset): read_message = True yield OffsetAndMessage(offset, message) except BufferUnderflowError: # NOTE: Not sure this is correct error handling: # Is it possible to get a BUE if the message set is somewhere # in the middle of the fetch response? If so, we probably have # an issue that's not fetch size too small. # Aren't we ignoring errors if we fail to unpack data by # raising StopIteration()? # If _decode_message() raises a ChecksumError, couldn't that # also be due to the fetch size being too small? if read_message is False: # If we get a partial read of a message, but haven't # yielded anything there's a problem raise ConsumerFetchSizeTooSmall() else: raise StopIteration()
[ "def", "_decode_message_set_iter", "(", "cls", ",", "data", ")", ":", "cur", "=", "0", "read_message", "=", "False", "while", "cur", "<", "len", "(", "data", ")", ":", "try", ":", "(", "(", "offset", ",", ")", ",", "cur", ")", "=", "relative_unpack",...
Iteratively decode a MessageSet Reads repeated elements of (offset, message), calling decode_message to decode a single message. Since compressed messages contain futher MessageSets, these two methods have been decoupled so that they may recurse easily.
[ "Iteratively", "decode", "a", "MessageSet" ]
python
train
bububa/pyTOP
pyTOP/taobaoke.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/taobaoke.py#L30-L42
def get(self, date, page_no=1, page_size=40, fields=[]): '''taobao.taobaoke.report.get 淘宝客报表查询 淘宝客报表查询''' request = TOPRequest('taobao.taobaoke.items.get') request['date'] = date request['page_no'] = page_no request['page_size'] = page_size if not fields: fields = self.fields request['fields'] = fields self.create(self.execute(request)['taobaoke_report']) return self
[ "def", "get", "(", "self", ",", "date", ",", "page_no", "=", "1", ",", "page_size", "=", "40", ",", "fields", "=", "[", "]", ")", ":", "request", "=", "TOPRequest", "(", "'taobao.taobaoke.items.get'", ")", "request", "[", "'date'", "]", "=", "date", ...
taobao.taobaoke.report.get 淘宝客报表查询 淘宝客报表查询
[ "taobao", ".", "taobaoke", ".", "report", ".", "get", "淘宝客报表查询", "淘宝客报表查询" ]
python
train
chrislit/abydos
abydos/stats/_mean.py
https://github.com/chrislit/abydos/blob/165466b3ff6afd8024a4c8660421b0c4e7773db9/abydos/stats/_mean.py#L289-L333
def imean(nums): r"""Return identric (exponential) mean. The identric mean of two numbers x and y is: x if x = y otherwise :math:`\frac{1}{e} \sqrt[x-y]{\frac{x^x}{y^y}}` Cf. https://en.wikipedia.org/wiki/Identric_mean Parameters ---------- nums : list A series of numbers Returns ------- float The identric mean of nums Raises ------ AttributeError imean supports no more than two values Examples -------- >>> imean([1, 2]) 1.4715177646857693 >>> imean([1, 0]) nan >>> imean([2, 4]) 2.9430355293715387 """ if len(nums) == 1: return nums[0] if len(nums) > 2: raise AttributeError('imean supports no more than two values') if nums[0] <= 0 or nums[1] <= 0: return float('NaN') elif nums[0] == nums[1]: return nums[0] return (1 / math.e) * (nums[0] ** nums[0] / nums[1] ** nums[1]) ** ( 1 / (nums[0] - nums[1]) )
[ "def", "imean", "(", "nums", ")", ":", "if", "len", "(", "nums", ")", "==", "1", ":", "return", "nums", "[", "0", "]", "if", "len", "(", "nums", ")", ">", "2", ":", "raise", "AttributeError", "(", "'imean supports no more than two values'", ")", "if", ...
r"""Return identric (exponential) mean. The identric mean of two numbers x and y is: x if x = y otherwise :math:`\frac{1}{e} \sqrt[x-y]{\frac{x^x}{y^y}}` Cf. https://en.wikipedia.org/wiki/Identric_mean Parameters ---------- nums : list A series of numbers Returns ------- float The identric mean of nums Raises ------ AttributeError imean supports no more than two values Examples -------- >>> imean([1, 2]) 1.4715177646857693 >>> imean([1, 0]) nan >>> imean([2, 4]) 2.9430355293715387
[ "r", "Return", "identric", "(", "exponential", ")", "mean", "." ]
python
valid
spacetelescope/drizzlepac
drizzlepac/tweakutils.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/tweakutils.py#L721-L724
def gauss(x, sigma): """ Compute 1-D value of gaussian at position x relative to center.""" return (np.exp(-np.power(x, 2) / (2 * np.power(sigma, 2))) / (sigma * np.sqrt(2 * np.pi)))
[ "def", "gauss", "(", "x", ",", "sigma", ")", ":", "return", "(", "np", ".", "exp", "(", "-", "np", ".", "power", "(", "x", ",", "2", ")", "/", "(", "2", "*", "np", ".", "power", "(", "sigma", ",", "2", ")", ")", ")", "/", "(", "sigma", ...
Compute 1-D value of gaussian at position x relative to center.
[ "Compute", "1", "-", "D", "value", "of", "gaussian", "at", "position", "x", "relative", "to", "center", "." ]
python
train
msmbuilder/msmbuilder
msmbuilder/tpt/hub.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/tpt/hub.py#L86-L136
def hub_scores(msm, waypoints=None): """ Calculate the hub score for one or more waypoints The "hub score" is a measure of how well traveled a certain state or set of states is in a network. Specifically, it is the fraction of times that a walker visits a state en route from some state A to another state B, averaged over all combinations of A and B. Parameters ---------- msm : msmbuilder.MarkovStateModel MSM to analyze waypoints : array_like, int, optional The index of the intermediate state (or more than one). If None, then all waypoints will be used Returns ------- hub_score : float The hub score for the waypoint References ---------- .. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052. """ n_states = msm.n_states_ if isinstance(waypoints, int): waypoints = [waypoints] elif waypoints is None: waypoints = xrange(n_states) elif not (isinstance(waypoints, list) or isinstance(waypoints, np.ndarray)): raise ValueError("waypoints (%s) must be an int, a list, or None" % str(waypoints)) hub_scores = [] for waypoint in waypoints: other_states = (i for i in xrange(n_states) if i != waypoint) # calculate the hub score for this waypoint hub_score = 0.0 for (source, sink) in itertools.permutations(other_states, 2): hub_score += fraction_visited(source, sink, waypoint, msm) hub_score /= float((n_states - 1) * (n_states - 2)) hub_scores.append(hub_score) return np.array(hub_scores)
[ "def", "hub_scores", "(", "msm", ",", "waypoints", "=", "None", ")", ":", "n_states", "=", "msm", ".", "n_states_", "if", "isinstance", "(", "waypoints", ",", "int", ")", ":", "waypoints", "=", "[", "waypoints", "]", "elif", "waypoints", "is", "None", ...
Calculate the hub score for one or more waypoints The "hub score" is a measure of how well traveled a certain state or set of states is in a network. Specifically, it is the fraction of times that a walker visits a state en route from some state A to another state B, averaged over all combinations of A and B. Parameters ---------- msm : msmbuilder.MarkovStateModel MSM to analyze waypoints : array_like, int, optional The index of the intermediate state (or more than one). If None, then all waypoints will be used Returns ------- hub_score : float The hub score for the waypoint References ---------- .. [1] Dickson & Brooks (2012), J. Chem. Theory Comput., 8, 3044-3052.
[ "Calculate", "the", "hub", "score", "for", "one", "or", "more", "waypoints" ]
python
train
greyli/flask-avatars
flask_avatars/identicon.py
https://github.com/greyli/flask-avatars/blob/13eca90342349c58962fef0ec541edcb1b009c70/flask_avatars/identicon.py#L87-L93
def _get_pastel_colour(self, lighten=127): """ Create a pastel colour hex colour string """ def r(): return random.randint(0, 128) + lighten return r(), r(), r()
[ "def", "_get_pastel_colour", "(", "self", ",", "lighten", "=", "127", ")", ":", "def", "r", "(", ")", ":", "return", "random", ".", "randint", "(", "0", ",", "128", ")", "+", "lighten", "return", "r", "(", ")", ",", "r", "(", ")", ",", "r", "("...
Create a pastel colour hex colour string
[ "Create", "a", "pastel", "colour", "hex", "colour", "string" ]
python
train
liip/taxi
taxi/timesheet/entry.py
https://github.com/liip/taxi/blob/269423c1f1ab571bd01a522819afe3e325bfbff6/taxi/timesheet/entry.py#L365-L375
def delete_date(self, date): """ Remove the date line from the textual representation. This doesn't remove any entry line. """ self.lines = [ line for line in self.lines if not isinstance(line, DateLine) or line.date != date ] self.lines = trim(self.lines)
[ "def", "delete_date", "(", "self", ",", "date", ")", ":", "self", ".", "lines", "=", "[", "line", "for", "line", "in", "self", ".", "lines", "if", "not", "isinstance", "(", "line", ",", "DateLine", ")", "or", "line", ".", "date", "!=", "date", "]",...
Remove the date line from the textual representation. This doesn't remove any entry line.
[ "Remove", "the", "date", "line", "from", "the", "textual", "representation", ".", "This", "doesn", "t", "remove", "any", "entry", "line", "." ]
python
train
honeynet/beeswarm
beeswarm/shared/asciify.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/shared/asciify.py#L30-L46
def _asciify_dict(data): """ Ascii-fies dict keys and values """ ret = {} for key, value in data.iteritems(): if isinstance(key, unicode): key = _remove_accents(key) key = key.encode('utf-8') # # note new if if isinstance(value, unicode): value = _remove_accents(value) value = value.encode('utf-8') elif isinstance(value, list): value = _asciify_list(value) elif isinstance(value, dict): value = _asciify_dict(value) ret[key] = value return ret
[ "def", "_asciify_dict", "(", "data", ")", ":", "ret", "=", "{", "}", "for", "key", ",", "value", "in", "data", ".", "iteritems", "(", ")", ":", "if", "isinstance", "(", "key", ",", "unicode", ")", ":", "key", "=", "_remove_accents", "(", "key", ")"...
Ascii-fies dict keys and values
[ "Ascii", "-", "fies", "dict", "keys", "and", "values" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_port_profile.py#L36-L48
def port_profile_vlan_profile_switchport_basic_basic(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop('name') vlan_profile = ET.SubElement(port_profile, "vlan-profile") switchport_basic = ET.SubElement(vlan_profile, "switchport-basic") basic = ET.SubElement(switchport_basic, "basic") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "port_profile_vlan_profile_switchport_basic_basic", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "port_profile", "=", "ET", ".", "SubElement", "(", "config", ",", "\"port-profile\"", ",", "...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
boriel/zxbasic
asmlex.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmlex.py#L377-L383
def t_INITIAL_SHARP(self, t): r'\#' if self.find_column(t) == 1: t.lexer.begin('preproc') else: self.t_INITIAL_preproc_error(t)
[ "def", "t_INITIAL_SHARP", "(", "self", ",", "t", ")", ":", "if", "self", ".", "find_column", "(", "t", ")", "==", "1", ":", "t", ".", "lexer", ".", "begin", "(", "'preproc'", ")", "else", ":", "self", ".", "t_INITIAL_preproc_error", "(", "t", ")" ]
r'\#
[ "r", "\\", "#" ]
python
train
mongodb/mongo-python-driver
pymongo/collection.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/pymongo/collection.py#L648-L696
def insert_one(self, document, bypass_document_validation=False, session=None): """Insert a single document. >>> db.test.count_documents({'x': 1}) 0 >>> result = db.test.insert_one({'x': 1}) >>> result.inserted_id ObjectId('54f112defba522406c9cc208') >>> db.test.find_one({'x': 1}) {u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')} :Parameters: - `document`: The document to insert. Must be a mutable mapping type. If the document does not have an _id field one will be added automatically. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.InsertOneResult`. .. seealso:: :ref:`writes-and-ids` .. note:: `bypass_document_validation` requires server version **>= 3.2** .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.2 Added bypass_document_validation support .. versionadded:: 3.0 """ common.validate_is_document_type("document", document) if not (isinstance(document, RawBSONDocument) or "_id" in document): document["_id"] = ObjectId() write_concern = self._write_concern_for(session) return InsertOneResult( self._insert(document, write_concern=write_concern, bypass_doc_val=bypass_document_validation, session=session), write_concern.acknowledged)
[ "def", "insert_one", "(", "self", ",", "document", ",", "bypass_document_validation", "=", "False", ",", "session", "=", "None", ")", ":", "common", ".", "validate_is_document_type", "(", "\"document\"", ",", "document", ")", "if", "not", "(", "isinstance", "(...
Insert a single document. >>> db.test.count_documents({'x': 1}) 0 >>> result = db.test.insert_one({'x': 1}) >>> result.inserted_id ObjectId('54f112defba522406c9cc208') >>> db.test.find_one({'x': 1}) {u'x': 1, u'_id': ObjectId('54f112defba522406c9cc208')} :Parameters: - `document`: The document to insert. Must be a mutable mapping type. If the document does not have an _id field one will be added automatically. - `bypass_document_validation`: (optional) If ``True``, allows the write to opt-out of document level validation. Default is ``False``. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. :Returns: - An instance of :class:`~pymongo.results.InsertOneResult`. .. seealso:: :ref:`writes-and-ids` .. note:: `bypass_document_validation` requires server version **>= 3.2** .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.2 Added bypass_document_validation support .. versionadded:: 3.0
[ "Insert", "a", "single", "document", "." ]
python
train
pyamg/pyamg
pyamg/util/utils.py
https://github.com/pyamg/pyamg/blob/89dc54aa27e278f65d2f54bdaf16ab97d7768fa6/pyamg/util/utils.py#L590-L679
def get_block_diag(A, blocksize, inv_flag=True): """Return the block diagonal of A, in array form. Parameters ---------- A : csr_matrix assumed to be square blocksize : int square block size for the diagonal inv_flag : bool if True, return the inverse of the block diagonal Returns ------- block_diag : array block diagonal of A in array form, array size is (A.shape[0]/blocksize, blocksize, blocksize) Examples -------- >>> from scipy import arange >>> from scipy.sparse import csr_matrix >>> from pyamg.util import get_block_diag >>> A = csr_matrix(arange(36).reshape(6,6)) >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False) >>> print block_diag_inv [[[ 0. 1.] [ 6. 7.]] <BLANKLINE> [[ 14. 15.] [ 20. 21.]] <BLANKLINE> [[ 28. 29.] [ 34. 35.]]] >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True) """ if not isspmatrix(A): raise TypeError('Expected sparse matrix') if A.shape[0] != A.shape[1]: raise ValueError("Expected square matrix") if sp.mod(A.shape[0], blocksize) != 0: raise ValueError("blocksize and A.shape must be compatible") # If the block diagonal of A already exists, return that if hasattr(A, 'block_D_inv') and inv_flag: if (A.block_D_inv.shape[1] == blocksize) and\ (A.block_D_inv.shape[2] == blocksize) and \ (A.block_D_inv.shape[0] == int(A.shape[0]/blocksize)): return A.block_D_inv elif hasattr(A, 'block_D') and (not inv_flag): if (A.block_D.shape[1] == blocksize) and\ (A.block_D.shape[2] == blocksize) and \ (A.block_D.shape[0] == int(A.shape[0]/blocksize)): return A.block_D # Convert to BSR if not isspmatrix_bsr(A): A = bsr_matrix(A, blocksize=(blocksize, blocksize)) if A.blocksize != (blocksize, blocksize): A = A.tobsr(blocksize=(blocksize, blocksize)) # Peel off block diagonal by extracting block entries from the now BSR # matrix A A = A.asfptype() block_diag = sp.zeros((int(A.shape[0]/blocksize), blocksize, blocksize), dtype=A.dtype) AAIJ = (sp.arange(1, A.indices.shape[0]+1), A.indices, A.indptr) shape = (int(A.shape[0]/blocksize), int(A.shape[0]/blocksize)) diag_entries = csr_matrix(AAIJ, shape=shape).diagonal() diag_entries -= 1 nonzero_mask = (diag_entries != -1) diag_entries = diag_entries[nonzero_mask] if diag_entries.shape != (0,): block_diag[nonzero_mask, :, :] = A.data[diag_entries, :, :] if inv_flag: # Invert each block if block_diag.shape[1] < 7: # This specialized routine lacks robustness for large matrices pyamg.amg_core.pinv_array(block_diag.ravel(), block_diag.shape[0], block_diag.shape[1], 'T') else: pinv_array(block_diag) A.block_D_inv = block_diag else: A.block_D = block_diag return block_diag
[ "def", "get_block_diag", "(", "A", ",", "blocksize", ",", "inv_flag", "=", "True", ")", ":", "if", "not", "isspmatrix", "(", "A", ")", ":", "raise", "TypeError", "(", "'Expected sparse matrix'", ")", "if", "A", ".", "shape", "[", "0", "]", "!=", "A", ...
Return the block diagonal of A, in array form. Parameters ---------- A : csr_matrix assumed to be square blocksize : int square block size for the diagonal inv_flag : bool if True, return the inverse of the block diagonal Returns ------- block_diag : array block diagonal of A in array form, array size is (A.shape[0]/blocksize, blocksize, blocksize) Examples -------- >>> from scipy import arange >>> from scipy.sparse import csr_matrix >>> from pyamg.util import get_block_diag >>> A = csr_matrix(arange(36).reshape(6,6)) >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=False) >>> print block_diag_inv [[[ 0. 1.] [ 6. 7.]] <BLANKLINE> [[ 14. 15.] [ 20. 21.]] <BLANKLINE> [[ 28. 29.] [ 34. 35.]]] >>> block_diag_inv = get_block_diag(A, blocksize=2, inv_flag=True)
[ "Return", "the", "block", "diagonal", "of", "A", "in", "array", "form", "." ]
python
train
googleapis/google-cloud-python
dns/google/cloud/dns/changes.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/changes.py#L193-L215
def _build_resource(self): """Generate a resource for ``create``.""" additions = [ { "name": added.name, "type": added.record_type, "ttl": str(added.ttl), "rrdatas": added.rrdatas, } for added in self.additions ] deletions = [ { "name": deleted.name, "type": deleted.record_type, "ttl": str(deleted.ttl), "rrdatas": deleted.rrdatas, } for deleted in self.deletions ] return {"additions": additions, "deletions": deletions}
[ "def", "_build_resource", "(", "self", ")", ":", "additions", "=", "[", "{", "\"name\"", ":", "added", ".", "name", ",", "\"type\"", ":", "added", ".", "record_type", ",", "\"ttl\"", ":", "str", "(", "added", ".", "ttl", ")", ",", "\"rrdatas\"", ":", ...
Generate a resource for ``create``.
[ "Generate", "a", "resource", "for", "create", "." ]
python
train
istresearch/scrapy-cluster
utils/scutils/settings_wrapper.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/settings_wrapper.py#L111-L125
def _convert_to_dict(self, setting): ''' Converts a settings file into a dictionary, ignoring python defaults @param setting: A loaded setting module ''' the_dict = {} set = dir(setting) for key in set: if key in self.ignore: continue value = getattr(setting, key) the_dict[key] = value return the_dict
[ "def", "_convert_to_dict", "(", "self", ",", "setting", ")", ":", "the_dict", "=", "{", "}", "set", "=", "dir", "(", "setting", ")", "for", "key", "in", "set", ":", "if", "key", "in", "self", ".", "ignore", ":", "continue", "value", "=", "getattr", ...
Converts a settings file into a dictionary, ignoring python defaults @param setting: A loaded setting module
[ "Converts", "a", "settings", "file", "into", "a", "dictionary", "ignoring", "python", "defaults" ]
python
train
sprockets/sprockets.mixins.http
sprockets/mixins/http/__init__.py
https://github.com/sprockets/sprockets.mixins.http/blob/982219a10be979668726f573f324415fcf2020c8/sprockets/mixins/http/__init__.py#L212-L236
def _deserialize(self): """Try and deserialize a response body based upon the specified content type. :rtype: mixed """ if not self._responses or not self._responses[-1].body: return None if 'Content-Type' not in self._responses[-1].headers: return self._responses[-1].body try: content_type = algorithms.select_content_type( [headers.parse_content_type( self._responses[-1].headers['Content-Type'])], AVAILABLE_CONTENT_TYPES) except errors.NoMatch: return self._responses[-1].body if content_type[0] == CONTENT_TYPE_JSON: return self._decode( self._json.loads(self._decode(self._responses[-1].body))) elif content_type[0] == CONTENT_TYPE_MSGPACK: # pragma: nocover return self._decode( self._msgpack.unpackb(self._responses[-1].body))
[ "def", "_deserialize", "(", "self", ")", ":", "if", "not", "self", ".", "_responses", "or", "not", "self", ".", "_responses", "[", "-", "1", "]", ".", "body", ":", "return", "None", "if", "'Content-Type'", "not", "in", "self", ".", "_responses", "[", ...
Try and deserialize a response body based upon the specified content type. :rtype: mixed
[ "Try", "and", "deserialize", "a", "response", "body", "based", "upon", "the", "specified", "content", "type", "." ]
python
train
pyca/pyopenssl
src/OpenSSL/crypto.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/crypto.py#L821-L834
def get_short_name(self): """ Returns the short type name of this X.509 extension. The result is a byte string such as :py:const:`b"basicConstraints"`. :return: The short type name. :rtype: :py:data:`bytes` .. versionadded:: 0.12 """ obj = _lib.X509_EXTENSION_get_object(self._extension) nid = _lib.OBJ_obj2nid(obj) return _ffi.string(_lib.OBJ_nid2sn(nid))
[ "def", "get_short_name", "(", "self", ")", ":", "obj", "=", "_lib", ".", "X509_EXTENSION_get_object", "(", "self", ".", "_extension", ")", "nid", "=", "_lib", ".", "OBJ_obj2nid", "(", "obj", ")", "return", "_ffi", ".", "string", "(", "_lib", ".", "OBJ_ni...
Returns the short type name of this X.509 extension. The result is a byte string such as :py:const:`b"basicConstraints"`. :return: The short type name. :rtype: :py:data:`bytes` .. versionadded:: 0.12
[ "Returns", "the", "short", "type", "name", "of", "this", "X", ".", "509", "extension", "." ]
python
test
hannorein/rebound
rebound/simulation.py
https://github.com/hannorein/rebound/blob/bb0f814c98e629401acaab657cae2304b0e003f7/rebound/simulation.py#L935-L978
def units(self): """ Tuple of the units for length, time and mass. Can be set in any order, and strings are not case-sensitive. See ipython_examples/Units.ipynb for more information. You can check the units' exact values and add Additional units in rebound/rebound/units.py. Units should be set before adding particles to the simulation (will give error otherwise). Currently supported Units ------------------------- Times: Hr : Hours Yr : Julian years Jyr : Julian years Sidereal_yr : Sidereal year Yr2pi : Year divided by 2pi, with year defined as orbital period of planet at 1AU around 1Msun star Kyr : Kiloyears (Julian) Myr : Megayears (Julian) Gyr : Gigayears (Julian) Lengths: M : Meters Cm : Centimeters Km : Kilometers AU : Astronomical Units Masses: Kg : Kilograms Msun : Solar masses Mmercury : Mercury masses Mvenus : Venus masses Mearth : Earth masses Mmars : Mars masses Mjupiter : Jupiter masses Msaturn : Saturn masses Muranus : Neptune masses Mpluto : Pluto masses Examples -------- >>> sim = rebound.Simulation() >>> sim.units = ('yr', 'AU', 'Msun') """ return {'length':hash_to_unit(self.python_unit_l), 'mass':hash_to_unit(self.python_unit_m), 'time':hash_to_unit(self.python_unit_t)}
[ "def", "units", "(", "self", ")", ":", "return", "{", "'length'", ":", "hash_to_unit", "(", "self", ".", "python_unit_l", ")", ",", "'mass'", ":", "hash_to_unit", "(", "self", ".", "python_unit_m", ")", ",", "'time'", ":", "hash_to_unit", "(", "self", "....
Tuple of the units for length, time and mass. Can be set in any order, and strings are not case-sensitive. See ipython_examples/Units.ipynb for more information. You can check the units' exact values and add Additional units in rebound/rebound/units.py. Units should be set before adding particles to the simulation (will give error otherwise). Currently supported Units ------------------------- Times: Hr : Hours Yr : Julian years Jyr : Julian years Sidereal_yr : Sidereal year Yr2pi : Year divided by 2pi, with year defined as orbital period of planet at 1AU around 1Msun star Kyr : Kiloyears (Julian) Myr : Megayears (Julian) Gyr : Gigayears (Julian) Lengths: M : Meters Cm : Centimeters Km : Kilometers AU : Astronomical Units Masses: Kg : Kilograms Msun : Solar masses Mmercury : Mercury masses Mvenus : Venus masses Mearth : Earth masses Mmars : Mars masses Mjupiter : Jupiter masses Msaturn : Saturn masses Muranus : Neptune masses Mpluto : Pluto masses Examples -------- >>> sim = rebound.Simulation() >>> sim.units = ('yr', 'AU', 'Msun')
[ "Tuple", "of", "the", "units", "for", "length", "time", "and", "mass", ".", "Can", "be", "set", "in", "any", "order", "and", "strings", "are", "not", "case", "-", "sensitive", ".", "See", "ipython_examples", "/", "Units", ".", "ipynb", "for", "more", "...
python
train
apache/spark
python/pyspark/streaming/dstream.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/streaming/dstream.py#L343-L352
def cogroup(self, other, numPartitions=None): """ Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions. """ if numPartitions is None: numPartitions = self._sc.defaultParallelism return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
[ "def", "cogroup", "(", "self", ",", "other", ",", "numPartitions", "=", "None", ")", ":", "if", "numPartitions", "is", "None", ":", "numPartitions", "=", "self", ".", "_sc", ".", "defaultParallelism", "return", "self", ".", "transformWith", "(", "lambda", ...
Return a new DStream by applying 'cogroup' between RDDs of this DStream and `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
[ "Return", "a", "new", "DStream", "by", "applying", "cogroup", "between", "RDDs", "of", "this", "DStream", "and", "other", "DStream", "." ]
python
train
jeffknupp/sandman2
sandman2/service.py
https://github.com/jeffknupp/sandman2/blob/1ce21d6f7a6df77fa96fab694b0f9bb8469c166b/sandman2/service.py#L112-L129
def patch(self, resource_id): """Return an HTTP response object resulting from an HTTP PATCH call. :returns: ``HTTP 200`` if the resource already exists :returns: ``HTTP 400`` if the request is malformed :returns: ``HTTP 404`` if the resource is not found :param resource_id: The value of the resource's primary key """ resource = self._resource(resource_id) error_message = is_valid_method(self.__model__, resource) if error_message: raise BadRequestException(error_message) if not request.json: raise BadRequestException('No JSON data received') resource.update(request.json) db.session().merge(resource) db.session().commit() return jsonify(resource)
[ "def", "patch", "(", "self", ",", "resource_id", ")", ":", "resource", "=", "self", ".", "_resource", "(", "resource_id", ")", "error_message", "=", "is_valid_method", "(", "self", ".", "__model__", ",", "resource", ")", "if", "error_message", ":", "raise", ...
Return an HTTP response object resulting from an HTTP PATCH call. :returns: ``HTTP 200`` if the resource already exists :returns: ``HTTP 400`` if the request is malformed :returns: ``HTTP 404`` if the resource is not found :param resource_id: The value of the resource's primary key
[ "Return", "an", "HTTP", "response", "object", "resulting", "from", "an", "HTTP", "PATCH", "call", "." ]
python
train
facelessuser/backrefs
backrefs/_bre_parse.py
https://github.com/facelessuser/backrefs/blob/3b3d60f5d57b02044f880aa29c9c5add0e31a34f/backrefs/_bre_parse.py#L624-L634
def main_group(self, i): """The main group: group 0.""" current = [] while True: try: t = next(i) current.extend(self.normal(t, i)) except StopIteration: break return current
[ "def", "main_group", "(", "self", ",", "i", ")", ":", "current", "=", "[", "]", "while", "True", ":", "try", ":", "t", "=", "next", "(", "i", ")", "current", ".", "extend", "(", "self", ".", "normal", "(", "t", ",", "i", ")", ")", "except", "...
The main group: group 0.
[ "The", "main", "group", ":", "group", "0", "." ]
python
train
saltstack/salt
salt/states/zone.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zone.py#L202-L253
def property_absent(name, property): ''' Ensure property is absent name : string name of the zone property : string name of property .. note:: This does a zoneacfg clear call. So the property may be reset to a default value! Does has the side effect of always having to be called. ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} zones = __salt__['zoneadm.list'](installed=True, configured=True) if name in zones: ## zone exists zonecfg = __salt__['zonecfg.info'](name, show_all=True) if property in zonecfg: if __opts__['test']: ret['result'] = True else: # clear property zonecfg_res = __salt__['zonecfg.clear_property'](name, property) zonecfg_new = __salt__['zonecfg.info'](name, show_all=True) ret['result'] = zonecfg_res['status'] if 'messages' in zonecfg_res: ret['comment'] = zonecfg_res['message'] if ret['result']: if property not in zonecfg_new: ret['changes'][property] = None elif zonecfg[property] != zonecfg_new[property]: ret['changes'][property] = zonecfg_new[property] if ret['comment'] == '': ret['comment'] = 'The property {0} was cleared!'.format(property) elif ret['comment'] == '': if ret['comment'] == '': ret['comment'] = 'The property {0} did not get cleared!'.format(property) else: ret['result'] = True ret['comment'] = 'The property {0} does not exist!'.format(property) else: ## zone does not exist ret['result'] = False ret['comment'] = 'The zone {0} is not in the configured, installed, or booted state.'.format(name) return ret
[ "def", "property_absent", "(", "name", ",", "property", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "None", ",", "'comment'", ":", "''", "}", "zones", "=", "__salt__", "[", "'zoneadm.list'", ...
Ensure property is absent name : string name of the zone property : string name of property .. note:: This does a zoneacfg clear call. So the property may be reset to a default value! Does has the side effect of always having to be called.
[ "Ensure", "property", "is", "absent" ]
python
train
astooke/gtimer
gtimer/public/io.py
https://github.com/astooke/gtimer/blob/2146dab459e5d959feb291821733d3d3ba7c523c/gtimer/public/io.py#L170-L193
def load_pkl(filenames): """ Unpickle file contents. Args: filenames (str): Can be one or a list or tuple of filenames to retrieve. Returns: Times: A single object, or from a collection of filenames, a list of Times objects. Raises: TypeError: If any loaded object is not a Times object. """ if not isinstance(filenames, (list, tuple)): filenames = [filenames] times = [] for name in filenames: name = str(name) with open(name, 'rb') as file: loaded_obj = pickle.load(file) if not isinstance(loaded_obj, Times): raise TypeError("At least one loaded object is not a Times data object.") times.append(loaded_obj) return times if len(times) > 1 else times[0]
[ "def", "load_pkl", "(", "filenames", ")", ":", "if", "not", "isinstance", "(", "filenames", ",", "(", "list", ",", "tuple", ")", ")", ":", "filenames", "=", "[", "filenames", "]", "times", "=", "[", "]", "for", "name", "in", "filenames", ":", "name",...
Unpickle file contents. Args: filenames (str): Can be one or a list or tuple of filenames to retrieve. Returns: Times: A single object, or from a collection of filenames, a list of Times objects. Raises: TypeError: If any loaded object is not a Times object.
[ "Unpickle", "file", "contents", "." ]
python
train
rigetti/pyquil
pyquil/api/_job.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_job.py#L162-L174
def _get_metadata(self, key): """ If the server returned a metadata dictionary, retrieve a particular key from it. If no metadata exists, or the key does not exist, return None. :param key: Metadata key, e.g., "gate_depth" :return: The associated metadata. :rtype: Optional[Any] """ if not self.is_done(): raise ValueError("Cannot get metadata for a program that isn't completed.") return self._raw.get("metadata", {}).get(key, None)
[ "def", "_get_metadata", "(", "self", ",", "key", ")", ":", "if", "not", "self", ".", "is_done", "(", ")", ":", "raise", "ValueError", "(", "\"Cannot get metadata for a program that isn't completed.\"", ")", "return", "self", ".", "_raw", ".", "get", "(", "\"me...
If the server returned a metadata dictionary, retrieve a particular key from it. If no metadata exists, or the key does not exist, return None. :param key: Metadata key, e.g., "gate_depth" :return: The associated metadata. :rtype: Optional[Any]
[ "If", "the", "server", "returned", "a", "metadata", "dictionary", "retrieve", "a", "particular", "key", "from", "it", ".", "If", "no", "metadata", "exists", "or", "the", "key", "does", "not", "exist", "return", "None", "." ]
python
train
mar10/pyftpsync
ftpsync/util.py
https://github.com/mar10/pyftpsync/blob/bbdc94186975cdc1cc4f678474bdce08bce7bb76/ftpsync/util.py#L202-L285
def get_credentials_for_url(url, opts, force_user=None): """Lookup credentials for a given target in keyring and .netrc. Optionally prompts for credentials if not found. Returns: 2-tuple (username, password) or None """ creds = None verbose = int(opts.get("verbose")) force_prompt = opts.get("prompt", False) allow_prompt = not opts.get("no_prompt", True) allow_keyring = not opts.get("no_keyring", False) and not force_user allow_netrc = not opts.get("no_netrc", False) and not force_user # print("get_credentials_for_url", force_user, allow_prompt) if force_user and not allow_prompt: raise RuntimeError( "Cannot get credentials for a distinct user ({}) from keyring or .netrc and " "prompting is disabled.".format(force_user) ) # Lookup our own pyftpsync 1.x credential store. This is deprecated with 2.x home_path = os.path.expanduser("~") file_path = os.path.join(home_path, DEFAULT_CREDENTIAL_STORE) if os.path.isfile(file_path): raise RuntimeError( "Custom password files are no longer supported. Delete {} and use .netrc instead.".format( file_path ) ) # Query keyring database if creds is None and keyring and allow_keyring: try: # Note: we pass the url as `username` and username:password as `password` c = keyring.get_password("pyftpsync", url) if c is not None: creds = c.split(":", 1) write( "Using credentials from keyring('pyftpsync', '{}'): {}:***.".format( url, creds[0] ) ) else: if verbose >= 4: write( "No credentials found in keyring('pyftpsync', '{}').".format( url ) ) # except keyring.errors.TransientKeyringError: except Exception as e: # e.g. user clicked 'no' write_error("Could not get password from keyring {}".format(e)) # Query .netrc file # print(opts) if creds is None and allow_netrc: try: authenticators = None authenticators = netrc.netrc().authenticators(url) except CompatFileNotFoundError: if verbose >= 4: write("Could not get password (no .netrc file).") except Exception as e: write_error("Could not read .netrc: {}.".format(e)) if authenticators: creds = (authenticators[0], authenticators[2]) write("Using credentials from .netrc file: {}:***.".format(creds[0])) else: if verbose >= 4: write("Could not find entry for '{}' in .netrc file.".format(url)) # Prompt for password if we don't have credentials yet, or --prompt was set. if allow_prompt: if creds is None: creds = prompt_for_password(url) elif force_prompt: # --prompt was set but we can provide a default for the user name creds = prompt_for_password(url, default_user=creds[0]) return creds
[ "def", "get_credentials_for_url", "(", "url", ",", "opts", ",", "force_user", "=", "None", ")", ":", "creds", "=", "None", "verbose", "=", "int", "(", "opts", ".", "get", "(", "\"verbose\"", ")", ")", "force_prompt", "=", "opts", ".", "get", "(", "\"pr...
Lookup credentials for a given target in keyring and .netrc. Optionally prompts for credentials if not found. Returns: 2-tuple (username, password) or None
[ "Lookup", "credentials", "for", "a", "given", "target", "in", "keyring", "and", ".", "netrc", "." ]
python
train
pseudonym117/Riot-Watcher
src/riotwatcher/_apis/SummonerApiV4.py
https://github.com/pseudonym117/Riot-Watcher/blob/21ab12453a0d824d67e30f5514d02a5c5a411dea/src/riotwatcher/_apis/SummonerApiV4.py#L62-L74
def by_id(self, region, encrypted_summoner_id): """ Get a summoner by summoner ID. :param string region: The region to execute this request on :param string encrypted_summoner_id: Summoner ID :returns: SummonerDTO: represents a summoner """ url, query = SummonerApiV4Urls.by_id( region=region, encrypted_summoner_id=encrypted_summoner_id ) return self._raw_request(self.by_id.__name__, region, url, query)
[ "def", "by_id", "(", "self", ",", "region", ",", "encrypted_summoner_id", ")", ":", "url", ",", "query", "=", "SummonerApiV4Urls", ".", "by_id", "(", "region", "=", "region", ",", "encrypted_summoner_id", "=", "encrypted_summoner_id", ")", "return", "self", "....
Get a summoner by summoner ID. :param string region: The region to execute this request on :param string encrypted_summoner_id: Summoner ID :returns: SummonerDTO: represents a summoner
[ "Get", "a", "summoner", "by", "summoner", "ID", "." ]
python
train
IDSIA/sacred
sacred/commandline_options.py
https://github.com/IDSIA/sacred/blob/72633776bed9b5bddf93ae7d215188e61970973a/sacred/commandline_options.py#L159-L165
def gather_command_line_options(filter_disabled=None): """Get a sorted list of all CommandLineOption subclasses.""" if filter_disabled is None: filter_disabled = not SETTINGS.COMMAND_LINE.SHOW_DISABLED_OPTIONS options = [opt for opt in get_inheritors(CommandLineOption) if not filter_disabled or opt._enabled] return sorted(options, key=lambda opt: opt.__name__)
[ "def", "gather_command_line_options", "(", "filter_disabled", "=", "None", ")", ":", "if", "filter_disabled", "is", "None", ":", "filter_disabled", "=", "not", "SETTINGS", ".", "COMMAND_LINE", ".", "SHOW_DISABLED_OPTIONS", "options", "=", "[", "opt", "for", "opt",...
Get a sorted list of all CommandLineOption subclasses.
[ "Get", "a", "sorted", "list", "of", "all", "CommandLineOption", "subclasses", "." ]
python
train
edx/edx-search
search/elastic.py
https://github.com/edx/edx-search/blob/476cf02b71ceba34ae7d8b798f36d60692317c55/search/elastic.py#L172-L185
def _process_facet_terms(facet_terms): """ We have a list of terms with which we return facets """ elastic_facets = {} for facet in facet_terms: facet_term = {"field": facet} if facet_terms[facet]: for facet_option in facet_terms[facet]: facet_term[facet_option] = facet_terms[facet][facet_option] elastic_facets[facet] = { "terms": facet_term } return elastic_facets
[ "def", "_process_facet_terms", "(", "facet_terms", ")", ":", "elastic_facets", "=", "{", "}", "for", "facet", "in", "facet_terms", ":", "facet_term", "=", "{", "\"field\"", ":", "facet", "}", "if", "facet_terms", "[", "facet", "]", ":", "for", "facet_option"...
We have a list of terms with which we return facets
[ "We", "have", "a", "list", "of", "terms", "with", "which", "we", "return", "facets" ]
python
valid
robertpeteuil/multi-cloud-control
mcc/cldcnct.py
https://github.com/robertpeteuil/multi-cloud-control/blob/f1565af1c0b6ed465ff312d3ccc592ba0609f4a2/mcc/cldcnct.py#L288-L299
def adj_nodes_ali(ali_nodes): """Adjust details specific to AliCloud.""" for node in ali_nodes: node.cloud = "alicloud" node.cloud_disp = "AliCloud" node.private_ips = ip_to_str(node.extra['vpc_attributes']['private_ip_address']) node.public_ips = ip_to_str(node.public_ips) node.zone = node.extra['zone_id'] node.size = node.extra['instance_type'] if node.size.startswith('ecs.'): node.size = node.size[len('ecs.'):] return ali_nodes
[ "def", "adj_nodes_ali", "(", "ali_nodes", ")", ":", "for", "node", "in", "ali_nodes", ":", "node", ".", "cloud", "=", "\"alicloud\"", "node", ".", "cloud_disp", "=", "\"AliCloud\"", "node", ".", "private_ips", "=", "ip_to_str", "(", "node", ".", "extra", "...
Adjust details specific to AliCloud.
[ "Adjust", "details", "specific", "to", "AliCloud", "." ]
python
train
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/proof.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/proof.py#L55-L70
async def deserialize(data: dict): """ Builds a Proof object with defined attributes. Attributes are provided by a previous call to the serialize function. :param data: Example: name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) data = proof.serialize() proof2 = await Proof.deserialize(data) :return: Proof Object """ return await Proof._deserialize("vcx_proof_deserialize", json.dumps(data), data.get('data').get('source_id'))
[ "async", "def", "deserialize", "(", "data", ":", "dict", ")", ":", "return", "await", "Proof", ".", "_deserialize", "(", "\"vcx_proof_deserialize\"", ",", "json", ".", "dumps", "(", "data", ")", ",", "data", ".", "get", "(", "'data'", ")", ".", "get", ...
Builds a Proof object with defined attributes. Attributes are provided by a previous call to the serialize function. :param data: Example: name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) data = proof.serialize() proof2 = await Proof.deserialize(data) :return: Proof Object
[ "Builds", "a", "Proof", "object", "with", "defined", "attributes", ".", "Attributes", "are", "provided", "by", "a", "previous", "call", "to", "the", "serialize", "function", ".", ":", "param", "data", ":", "Example", ":", "name", "=", "proof", "name", "req...
python
train
gwpy/gwpy
gwpy/segments/flag.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/segments/flag.py#L457-L522
def query_dqsegdb(cls, flag, *args, **kwargs): """Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately. """ # parse arguments qsegs = _parse_query_segments(args, cls.query_dqsegdb) # get server url = kwargs.pop('url', DEFAULT_SEGMENT_SERVER) # parse flag out = cls(name=flag) if out.ifo is None or out.tag is None: raise ValueError("Cannot parse ifo or tag (name) for flag %r" % flag) # process query for start, end in qsegs: # handle infinities if float(end) == +inf: end = to_gps('now').seconds # query try: data = query_segments(flag, int(start), int(end), host=url) except HTTPError as exc: if exc.code == 404: # if not found, annotate flag name exc.msg += ' [{0}]'.format(flag) raise # read from json buffer new = cls.read( BytesIO(json.dumps(data).encode('utf-8')), format='json', ) # restrict to query segments segl = SegmentList([Segment(start, end)]) new.known &= segl new.active &= segl out += new # replace metadata out.description = new.description out.isgood = new.isgood return out
[ "def", "query_dqsegdb", "(", "cls", ",", "flag", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# parse arguments", "qsegs", "=", "_parse_query_segments", "(", "args", ",", "cls", ".", "query_dqsegdb", ")", "# get server", "url", "=", "kwargs", ".",...
Query the advanced LIGO DQSegDB for the given flag Parameters ---------- flag : `str` The name of the flag for which to query *args Either, two `float`-like numbers indicating the GPS [start, stop) interval, or a `SegmentList` defining a number of summary segments url : `str`, optional URL of the segment database, defaults to ``$DEFAULT_SEGMENT_SERVER`` environment variable, or ``'https://segments.ligo.org'`` Returns ------- flag : `DataQualityFlag` A new `DataQualityFlag`, with the `known` and `active` lists filled appropriately.
[ "Query", "the", "advanced", "LIGO", "DQSegDB", "for", "the", "given", "flag" ]
python
train
pallets/werkzeug
src/werkzeug/utils.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/src/werkzeug/utils.py#L669-L707
def bind_arguments(func, args, kwargs): """Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments. """ ( args, kwargs, missing, extra, extra_positional, arg_spec, vararg_var, kwarg_var, ) = _parse_signature(func)(args, kwargs) values = {} for (name, _has_default, _default), value in zip(arg_spec, args): values[name] = value if vararg_var is not None: values[vararg_var] = tuple(extra_positional) elif extra_positional: raise TypeError("too many positional arguments") if kwarg_var is not None: multikw = set(extra) & set([x[0] for x in arg_spec]) if multikw: raise TypeError( "got multiple values for keyword argument " + repr(next(iter(multikw))) ) values[kwarg_var] = extra elif extra: raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) return values
[ "def", "bind_arguments", "(", "func", ",", "args", ",", "kwargs", ")", ":", "(", "args", ",", "kwargs", ",", "missing", ",", "extra", ",", "extra_positional", ",", "arg_spec", ",", "vararg_var", ",", "kwarg_var", ",", ")", "=", "_parse_signature", "(", "...
Bind the arguments provided into a dict. When passed a function, a tuple of arguments and a dict of keyword arguments `bind_arguments` returns a dict of names as the function would see it. This can be useful to implement a cache decorator that uses the function arguments to build the cache key based on the values of the arguments. :param func: the function the arguments should be bound for. :param args: tuple of positional arguments. :param kwargs: a dict of keyword arguments. :return: a :class:`dict` of bound keyword arguments.
[ "Bind", "the", "arguments", "provided", "into", "a", "dict", ".", "When", "passed", "a", "function", "a", "tuple", "of", "arguments", "and", "a", "dict", "of", "keyword", "arguments", "bind_arguments", "returns", "a", "dict", "of", "names", "as", "the", "f...
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L89-L109
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "oauthlib", ".", "oauth2", ".", "is_secure_transport", "(", "url", ")", ":", "m", "=", "super", "(", "OAuthSession", ",", "self", ")", ".", "request", ...
Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
[ "Constructs", "a", ":", "class", ":", "requests", ".", "Request", "prepares", "it", "and", "sends", "it", ".", "Raises", "HTTPErrors", "by", "default", "." ]
python
train
draperjames/qtpandas
qtpandas/models/DataFrameModel.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/DataFrameModel.py#L621-L632
def enableEditing(self, editable=True): """ Sets the DataFrameModel and columnDtypeModel's editable properties. :param editable: bool defaults to True, False disables most editing methods. :return: None """ self.editable = editable self._columnDtypeModel.setEditable(self.editable)
[ "def", "enableEditing", "(", "self", ",", "editable", "=", "True", ")", ":", "self", ".", "editable", "=", "editable", "self", ".", "_columnDtypeModel", ".", "setEditable", "(", "self", ".", "editable", ")" ]
Sets the DataFrameModel and columnDtypeModel's editable properties. :param editable: bool defaults to True, False disables most editing methods. :return: None
[ "Sets", "the", "DataFrameModel", "and", "columnDtypeModel", "s", "editable", "properties", ".", ":", "param", "editable", ":", "bool", "defaults", "to", "True", "False", "disables", "most", "editing", "methods", ".", ":", "return", ":", "None" ]
python
train
iotaledger/iota.lib.py
iota/adapter/sandbox.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/iota/adapter/sandbox.py#L191-L205
def get_jobs_url(self, job_id): # type: (Text) -> Text """ Returns the URL to check job status. :param job_id: The ID of the job to check. """ return compat.urllib_parse.urlunsplit(( self.uri.scheme, self.uri.netloc, self.uri.path.rstrip('/') + '/jobs/' + job_id, self.uri.query, self.uri.fragment, ))
[ "def", "get_jobs_url", "(", "self", ",", "job_id", ")", ":", "# type: (Text) -> Text", "return", "compat", ".", "urllib_parse", ".", "urlunsplit", "(", "(", "self", ".", "uri", ".", "scheme", ",", "self", ".", "uri", ".", "netloc", ",", "self", ".", "uri...
Returns the URL to check job status. :param job_id: The ID of the job to check.
[ "Returns", "the", "URL", "to", "check", "job", "status", "." ]
python
test
zimeon/iiif
iiif/generators/check.py
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/check.py#L31-L55
def pixel(self, x, y, size=None, red=0): """Return color for a pixel. The paremeter size is used to recursively follow down to smaller and smaller middle squares (number 5). The paremeter red is used to shade from black to red going toward the middle of the figure. """ if (size is None): size = self.sz # Have we go to the smallest element? if (size <= 3): if (_num(x, y) % 2): return (red, 0, 0) else: return None divisor = size // 3 n = _num(x // divisor, y // divisor) if (n == 5): # Middle square further divided return self.pixel(x % divisor, y % divisor, divisor, min(red + 25, 255)) elif (n % 2): return (red, 0, 0) else: return None
[ "def", "pixel", "(", "self", ",", "x", ",", "y", ",", "size", "=", "None", ",", "red", "=", "0", ")", ":", "if", "(", "size", "is", "None", ")", ":", "size", "=", "self", ".", "sz", "# Have we go to the smallest element?", "if", "(", "size", "<=", ...
Return color for a pixel. The paremeter size is used to recursively follow down to smaller and smaller middle squares (number 5). The paremeter red is used to shade from black to red going toward the middle of the figure.
[ "Return", "color", "for", "a", "pixel", "." ]
python
train
szastupov/aiotg
aiotg/bot.py
https://github.com/szastupov/aiotg/blob/eed81a6a728c02120f1d730a6e8b8fe50263c010/aiotg/bot.py#L489-L504
def edit_message_reply_markup(self, chat_id, message_id, reply_markup, **options): """ Edit a reply markup of message in a chat :param int chat_id: ID of the chat the message to edit is in :param int message_id: ID of the message to edit :param str reply_markup: New inline keyboard markup for the message :param options: Additional API options """ return self.api_call( "editMessageReplyMarkup", chat_id=chat_id, message_id=message_id, reply_markup=reply_markup, **options )
[ "def", "edit_message_reply_markup", "(", "self", ",", "chat_id", ",", "message_id", ",", "reply_markup", ",", "*", "*", "options", ")", ":", "return", "self", ".", "api_call", "(", "\"editMessageReplyMarkup\"", ",", "chat_id", "=", "chat_id", ",", "message_id", ...
Edit a reply markup of message in a chat :param int chat_id: ID of the chat the message to edit is in :param int message_id: ID of the message to edit :param str reply_markup: New inline keyboard markup for the message :param options: Additional API options
[ "Edit", "a", "reply", "markup", "of", "message", "in", "a", "chat" ]
python
train
nosegae/NoseGAE
nosegae.py
https://github.com/nosegae/NoseGAE/blob/fca9fab22b480bb9721ecaa0967a636107648d92/nosegae.py#L251-L253
def _init_stub(self, stub_init, **stub_kwargs): """Initializes all other stubs for consistency's sake""" getattr(self.testbed, stub_init, lambda **kwargs: None)(**stub_kwargs)
[ "def", "_init_stub", "(", "self", ",", "stub_init", ",", "*", "*", "stub_kwargs", ")", ":", "getattr", "(", "self", ".", "testbed", ",", "stub_init", ",", "lambda", "*", "*", "kwargs", ":", "None", ")", "(", "*", "*", "stub_kwargs", ")" ]
Initializes all other stubs for consistency's sake
[ "Initializes", "all", "other", "stubs", "for", "consistency", "s", "sake" ]
python
train
bcbio/bcbio-nextgen
bcbio/workflow/template.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/workflow/template.py#L279-L308
def _parse_metadata(in_handle): """Reads metadata from a simple CSV structured input file. samplename,batch,phenotype ERR256785,batch1,normal """ metadata = {} reader = csv.reader(in_handle) while 1: header = next(reader) if not header[0].startswith("#"): break keys = [x.strip() for x in header[1:]] for sinfo in (x for x in reader if x and not x[0].startswith("#")): sinfo = [_strip_and_convert_lists(x) for x in sinfo] sample = sinfo[0] if isinstance(sample, list): sample = tuple(sample) # sanity check to avoid duplicate rows if sample in metadata: raise ValueError("Sample %s present multiple times in metadata file.\n" "If you need to specify multiple attributes as a list " "use a semi-colon to separate them on a single line.\n" "https://bcbio-nextgen.readthedocs.org/en/latest/" "contents/configuration.html#automated-sample-configuration\n" "Duplicate line is %s" % (sample, sinfo)) vals = [_clean_string(v, sinfo) for v in sinfo[1:]] metadata[sample] = dict(zip(keys, vals)) metadata, global_vars = _set_global_vars(metadata) return metadata, global_vars
[ "def", "_parse_metadata", "(", "in_handle", ")", ":", "metadata", "=", "{", "}", "reader", "=", "csv", ".", "reader", "(", "in_handle", ")", "while", "1", ":", "header", "=", "next", "(", "reader", ")", "if", "not", "header", "[", "0", "]", ".", "s...
Reads metadata from a simple CSV structured input file. samplename,batch,phenotype ERR256785,batch1,normal
[ "Reads", "metadata", "from", "a", "simple", "CSV", "structured", "input", "file", "." ]
python
train
pkgw/pwkit
pwkit/contours.py
https://github.com/pkgw/pwkit/blob/d40957a1c3d2ea34e7ceac2267ee9635135f2793/pwkit/contours.py#L22-L278
def analytic_2d (f, df, x0, y0, maxiters=5000, defeta=0.05, netastep=12, vtol1=1e-3, vtol2=1e-8, maxnewt=20, dorder=7, goright=False): """Sample a contour in a 2D analytic function. Arguments: f A function, mapping (x, y) -> z. df The partial derivative: df (x, y) -> [dz/dx, dz/dy]. If None, the derivative of f is approximated numerically with scipy.derivative. x0 Initial x value. Should be of "typical" size for the problem; avoid 0. y0 Initial y value. Should be of "typical" size for the problem; avoid 0. Optional arguments: maxiters Maximum number of points to create. Default 5000. defeta Initially offset by distances of defeta*[df/dx, df/dy] Default 0.05. netastep Number of steps between defeta and the machine resolution in which we test eta values for goodness. (OMG FIXME doc). Default 12. vtol1 Tolerance for constancy in the value of the function in the initial offset step. The value is only allowed to vary by ``f(x0,y0) * vtol1``. Default 1e-3. vtol2 Tolerance for constancy in the value of the function in the along the contour. The value is only allowed to vary by ``f(x0,y0) * vtol2``. Default 1e-8. maxnewt Maximum number of Newton's method steps to take when attempting to hone in on the desired function value. Default 20. dorder Number of function evaluations to perform when evaluating the derivative of f numerically. Must be an odd integer greater than 1. Default 7. goright If True, trace the contour rightward (as looking uphill), rather than leftward (the default). """ # Coerce argument types. if not callable (f): raise ValueError ('f') if df is not None and not callable (df): raise ValueError ('df') x0 = float (x0) if x0 == 0.: raise ValueError ('x0') y0 = float (y0) if y0 == 0.: raise ValueError ('y0') maxiters = int (maxiters) if maxiters < 3: raise ValueError ('maxiters') defeta = float (defeta) if defeta <= 0: raise ValueError ('defeta') netastep = int (netastep) if netastep < 2: raise ValueError ('netastep') vtol1 = float (vtol1) if vtol1 <= 0: raise ValueError ('vtol1') vtol2 = float (vtol2) if vtol2 >= vtol1: raise ValueError ('vtol2') maxnewt = int (maxnewt) if maxnewt < 1: raise ValueError ('maxnewt') # What value are we contouring? v = f (x0, y0) # If no derivative is given, use a numerical approximation. if df is None: derivx = abs (x0 * 0.025) derivy = abs (y0 * 0.025) from scipy import derivative if dorder == 2: # simple derivative def df (x1, y1): z0 = f (x1, y1) dx = max (abs (x1) * 1e-5, 1e-8) dy = max (abs (y1) * 1e-5, 1e-8) dzdx = (f (x1 + dx, y1) - z0) / dx dzdy = (f (x1, y1 + dy) - z0) / dy return [dzdx, dzdy] else: def df (x1, y1): dx = derivative (lambda x: f (x, y1), x1, derivx, order=dorder) dy = derivative (lambda y: f (x1, y), y1, derivy, order=dorder) return [dx, dy] # Init eta progression. rez = np.finfo (np.double).resolution if rez > defeta: raise PKError ('defeta below resolution!') eta_scale = np.exp ((np.log (rez) - np.log (defeta)) / netastep) # Init data storage n = 1 pts = np.empty ((maxiters, 2)) pts[0] = (x0, y0) x = x0 y = y0 # Quitflag: 0 if first iteration # 1 if inited but not yet ok to quit (definition of this below) # 2 if ok to quit # initquad: 0 if x > 0, y > 0 # 1 if x < 0, y > 0 # 2 if x < 0, y < 0 # 3 if x > 0, y < 0 # We invert these senses in the in-loop test to make comparison easy. quitflag = 0 initquad = -1 # Start finding contours. while n < maxiters: dfdx, dfdy = df (x, y) # If we're booting up, remember the quadrant that df/dx points in. # Once we've rotated around to the other direction, it is safe to quit # once we return close to the original point, since we must have # completed a circle. if quitflag == 0: if dfdx > 0: if dfdy > 0: initquad = 0 else: initquad = 3 else: if dfdy > 0: initquad = 1 else: initquad = 2 quitflag = 1 elif quitflag == 1: if dfdx > 0: if dfdy > 0: curquad = 2 else: curquad = 1 else: if dfdy > 0: curquad = 3 else: curquad = 0 if curquad == initquad: quitflag = 2 # We will move perpendicular to [df/dx, df/dy], rotating to the left # (arbitrarily) from that direction. We need to figure out how far we # can safely move in this direction. if goright: dx = dfdy * defeta dy = -dfdx * defeta else: dx = -dfdy * defeta dy = dfdx * defeta i = 0 while i < netastep: nx = x + dx ny = y + dy nv = f (nx, ny) # Is the value of the function sufficently close to what # we're aiming for? if abs (nv / v - 1) < vtol1: break # No. Try a smaller dx/dy. dx *= eta_scale dy *= eta_scale i += 1 else: # Failed to find a sufficiently small eta (did not break out of # loop) raise PKError ('failed to find sufficiently small eta: xy %g,%g; ' 'dv %g; df %g,%g; dxy %g,%g; defeta %g; eta_scale ' '%g' % (x, y, nv - v, dfdx, dfdy, dx, dy, defeta, eta_scale)) # Now compute a new [df/dx, df/dy], and move along it, finding our way # back to the desired value, 'v'. Newton's method should suffice. This # loop usually exits after one iteration. i = 0 while i < maxnewt: dfdx, dfdy = df (nx, ny) df2 = dfdx**2 + dfdy**2 dv = nv - v nx -= dv * dfdx / df2 ny -= dv * dfdy / df2 nv = f (nx, ny) if abs (nv/v - 1) < vtol2: break i += 1 else: # Did not break out of loop. raise PKError ('failed to converge with Newton\'s method') # Ok, we found our next value. pts[n] = (nx, ny) x = nx y = ny n += 1 # Time to stop? Make sure we've gone at least a half-turn so that we # don't just exit on the first iteration. if quitflag == 2: dist2 = (x/x0 - 1)**2 + (y/y0 - 1)**2 if dist2 < 3 * (dx**2 + dy**2): break else: raise PKError ('needed too many points to close contour') # Woohoo! All done. return pts[:n]
[ "def", "analytic_2d", "(", "f", ",", "df", ",", "x0", ",", "y0", ",", "maxiters", "=", "5000", ",", "defeta", "=", "0.05", ",", "netastep", "=", "12", ",", "vtol1", "=", "1e-3", ",", "vtol2", "=", "1e-8", ",", "maxnewt", "=", "20", ",", "dorder",...
Sample a contour in a 2D analytic function. Arguments: f A function, mapping (x, y) -> z. df The partial derivative: df (x, y) -> [dz/dx, dz/dy]. If None, the derivative of f is approximated numerically with scipy.derivative. x0 Initial x value. Should be of "typical" size for the problem; avoid 0. y0 Initial y value. Should be of "typical" size for the problem; avoid 0. Optional arguments: maxiters Maximum number of points to create. Default 5000. defeta Initially offset by distances of defeta*[df/dx, df/dy] Default 0.05. netastep Number of steps between defeta and the machine resolution in which we test eta values for goodness. (OMG FIXME doc). Default 12. vtol1 Tolerance for constancy in the value of the function in the initial offset step. The value is only allowed to vary by ``f(x0,y0) * vtol1``. Default 1e-3. vtol2 Tolerance for constancy in the value of the function in the along the contour. The value is only allowed to vary by ``f(x0,y0) * vtol2``. Default 1e-8. maxnewt Maximum number of Newton's method steps to take when attempting to hone in on the desired function value. Default 20. dorder Number of function evaluations to perform when evaluating the derivative of f numerically. Must be an odd integer greater than 1. Default 7. goright If True, trace the contour rightward (as looking uphill), rather than leftward (the default).
[ "Sample", "a", "contour", "in", "a", "2D", "analytic", "function", ".", "Arguments", ":" ]
python
train
materialsproject/pymatgen-db
matgendb/builders/incr.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/builders/incr.py#L355-L371
def save(self, mark): """Save a position in this collection. :param mark: The position to save :type mark: Mark :raises: DBError, NoTrackingCollection """ self._check_exists() obj = mark.as_dict() try: # Make a 'filter' to find/update existing record, which uses # the field name and operation (but not the position). filt = {k: obj[k] for k in (mark.FLD_FLD, mark.FLD_OP)} _log.debug("save: upsert-spec={} upsert-obj={}".format(filt, obj)) self._track.update(filt, obj, upsert=True) except pymongo.errors.PyMongoError as err: raise DBError("{}".format(err))
[ "def", "save", "(", "self", ",", "mark", ")", ":", "self", ".", "_check_exists", "(", ")", "obj", "=", "mark", ".", "as_dict", "(", ")", "try", ":", "# Make a 'filter' to find/update existing record, which uses", "# the field name and operation (but not the position).",...
Save a position in this collection. :param mark: The position to save :type mark: Mark :raises: DBError, NoTrackingCollection
[ "Save", "a", "position", "in", "this", "collection", "." ]
python
train
svartalf/python-opus
opus/api/ctl.py
https://github.com/svartalf/python-opus/blob/a3c1d556d2772b5be659ddd08c033ddd4d566b3a/opus/api/ctl.py#L19-L30
def query(request): """Query encoder/decoder with a request value""" def inner(func, obj): result_code = func(obj, request) if result_code is not constants.OK: raise OpusError(result_code) return result_code return inner
[ "def", "query", "(", "request", ")", ":", "def", "inner", "(", "func", ",", "obj", ")", ":", "result_code", "=", "func", "(", "obj", ",", "request", ")", "if", "result_code", "is", "not", "constants", ".", "OK", ":", "raise", "OpusError", "(", "resul...
Query encoder/decoder with a request value
[ "Query", "encoder", "/", "decoder", "with", "a", "request", "value" ]
python
train
lotabout/pymustache
pymustache/mustache.py
https://github.com/lotabout/pymustache/blob/d4089e49cda01fc11bab0c986d95e25150a60bac/pymustache/mustache.py#L263-L269
def _escape(self, text): """Escape text according to self.escape""" ret = EMPTYSTRING if text is None else str(text) if self.escape: return html_escape(ret) else: return ret
[ "def", "_escape", "(", "self", ",", "text", ")", ":", "ret", "=", "EMPTYSTRING", "if", "text", "is", "None", "else", "str", "(", "text", ")", "if", "self", ".", "escape", ":", "return", "html_escape", "(", "ret", ")", "else", ":", "return", "ret" ]
Escape text according to self.escape
[ "Escape", "text", "according", "to", "self", ".", "escape" ]
python
train
google/fleetspeak
fleetspeak/src/client/daemonservice/client/client.py
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L58-L71
def _EnvOpen(var, mode): """Open a file descriptor identified by an environment variable.""" value = os.getenv(var) if value is None: raise ValueError("%s is not set" % var) fd = int(value) # If running on Windows, convert the file handle to a C file descriptor; see: # https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4 if _WINDOWS: fd = msvcrt.open_osfhandle(fd, 0) return os.fdopen(fd, mode)
[ "def", "_EnvOpen", "(", "var", ",", "mode", ")", ":", "value", "=", "os", ".", "getenv", "(", "var", ")", "if", "value", "is", "None", ":", "raise", "ValueError", "(", "\"%s is not set\"", "%", "var", ")", "fd", "=", "int", "(", "value", ")", "# If...
Open a file descriptor identified by an environment variable.
[ "Open", "a", "file", "descriptor", "identified", "by", "an", "environment", "variable", "." ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/combo_box.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/combo_box.py#L31-L83
def selectitem(self, window_name, object_name, item_name): """ Select combo box / layered pane item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) self._grabfocus(object_handle.AXWindow) try: object_handle.Press() except AttributeError: # AXPress doesn't work with Instruments # So did the following work around x, y, width, height = self._getobjectsize(object_handle) # Mouse left click on the object # Note: x + width/2, y + height / 2 doesn't work self.generatemouseevent(x + 5, y + 5, "b1c") self.wait(5) handle = self._get_sub_menu_handle(object_handle, item_name) x, y, width, height = self._getobjectsize(handle) # on OSX 10.7 default "b1c" doesn't work # so using "b1d", verified with Fusion test, this works self.generatemouseevent(x + 5, y + 5, "b1d") return 1 # Required for menuitem to appear in accessibility list self.wait(1) menu_list = re.split(";", item_name) try: menu_handle = self._internal_menu_handler(object_handle, menu_list, True) # Required for menuitem to appear in accessibility list self.wait(1) if not menu_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % \ menu_list[-1]) menu_handle.Press() except LdtpServerException: object_handle.activate() object_handle.sendKey(AXKeyCodeConstants.ESCAPE) raise return 1
[ "def", "selectitem", "(", "self", ",", "window_name", ",", "object_name", ",", "item_name", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "object_handle", ".", "AXEnabled", ":", "ra...
Select combo box / layered pane item @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @param item_name: Item name to select @type object_name: string @return: 1 on success. @rtype: integer
[ "Select", "combo", "box", "/", "layered", "pane", "item", "@param", "window_name", ":", "Window", "name", "to", "type", "in", "either", "full", "name", "LDTP", "s", "name", "convention", "or", "a", "Unix", "glob", ".", "@type", "window_name", ":", "string"...
python
valid
rigetti/pyquil
pyquil/wavefunction.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/wavefunction.py#L230-L247
def _octet_bits(o): """ Get the bits of an octet. :param o: The octets. :return: The bits as a list in LSB-to-MSB order. :rtype: list """ if not isinstance(o, integer_types): raise TypeError("o should be an int") if not (0 <= o <= 255): raise ValueError("o should be between 0 and 255 inclusive") bits = [0] * 8 for i in range(8): if 1 == o & 1: bits[i] = 1 o = o >> 1 return bits
[ "def", "_octet_bits", "(", "o", ")", ":", "if", "not", "isinstance", "(", "o", ",", "integer_types", ")", ":", "raise", "TypeError", "(", "\"o should be an int\"", ")", "if", "not", "(", "0", "<=", "o", "<=", "255", ")", ":", "raise", "ValueError", "("...
Get the bits of an octet. :param o: The octets. :return: The bits as a list in LSB-to-MSB order. :rtype: list
[ "Get", "the", "bits", "of", "an", "octet", "." ]
python
train
saltstack/salt
salt/modules/rh_service.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rh_service.py#L186-L204
def _chkconfig_is_enabled(name, runlevel=None): ''' Return ``True`` if the service is enabled according to chkconfig; otherwise return ``False``. If ``runlevel`` is ``None``, then use the current runlevel. ''' cmdline = '/sbin/chkconfig --list {0}'.format(name) result = __salt__['cmd.run_all'](cmdline, python_shell=False) if runlevel is None: runlevel = _runlevel() if result['retcode'] == 0: for row in result['stdout'].splitlines(): if '{0}:on'.format(runlevel) in row: if row.split()[0] == name: return True elif row.split() == [name, 'on']: return True return False
[ "def", "_chkconfig_is_enabled", "(", "name", ",", "runlevel", "=", "None", ")", ":", "cmdline", "=", "'/sbin/chkconfig --list {0}'", ".", "format", "(", "name", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmdline", ",", "python_shell", "="...
Return ``True`` if the service is enabled according to chkconfig; otherwise return ``False``. If ``runlevel`` is ``None``, then use the current runlevel.
[ "Return", "True", "if", "the", "service", "is", "enabled", "according", "to", "chkconfig", ";", "otherwise", "return", "False", ".", "If", "runlevel", "is", "None", "then", "use", "the", "current", "runlevel", "." ]
python
train
StackStorm/pybind
pybind/nos/v6_0_2f/brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/__init__.py#L267-L290
def _set_node_hw_sync_state(self, v, load=False): """ Setter method for node_hw_sync_state, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/node_hw_sync_state (node-hw-sync-state-type) If this variable is read-only (config: false) in the source YANG file, then _set_node_hw_sync_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_node_hw_sync_state() directly. YANG Description: Node hardware synchronization state """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'node-in-sync': {'value': 4}, u'node-uninitialized': {'value': 2}, u'node-unknown': {'value': 1}, u'node-synchronizing': {'value': 3}, u'node-out-of-sync': {'value': 5}},), is_leaf=True, yang_name="node-hw-sync-state", rest_name="node-hw-sync-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='node-hw-sync-state-type', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """node_hw_sync_state must be of a type compatible with node-hw-sync-state-type""", 'defined-type': "brocade-vcs:node-hw-sync-state-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'node-in-sync': {'value': 4}, u'node-uninitialized': {'value': 2}, u'node-unknown': {'value': 1}, u'node-synchronizing': {'value': 3}, u'node-out-of-sync': {'value': 5}},), is_leaf=True, yang_name="node-hw-sync-state", rest_name="node-hw-sync-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-vcs', defining_module='brocade-vcs', yang_type='node-hw-sync-state-type', is_config=True)""", }) self.__node_hw_sync_state = t if hasattr(self, '_set'): self._set()
[ "def", "_set_node_hw_sync_state", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ","...
Setter method for node_hw_sync_state, mapped from YANG variable /brocade_vcs_rpc/show_vcs/output/vcs_nodes/vcs_node_info/node_hw_sync_state (node-hw-sync-state-type) If this variable is read-only (config: false) in the source YANG file, then _set_node_hw_sync_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_node_hw_sync_state() directly. YANG Description: Node hardware synchronization state
[ "Setter", "method", "for", "node_hw_sync_state", "mapped", "from", "YANG", "variable", "/", "brocade_vcs_rpc", "/", "show_vcs", "/", "output", "/", "vcs_nodes", "/", "vcs_node_info", "/", "node_hw_sync_state", "(", "node", "-", "hw", "-", "sync", "-", "state", ...
python
train
quantumlib/Cirq
cirq/sim/wave_function.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/sim/wave_function.py#L82-L116
def density_matrix_of(self, qubits: List[ops.Qid] = None) -> np.ndarray: r"""Returns the density matrix of the state. Calculate the density matrix for the system on the list, qubits. Any qubits not in the list that are present in self.state_vector() will be traced out. If qubits is None the full density matrix for self.state_vector() is returned, given self.state_vector() follows standard Kronecker convention of numpy.kron. For example: self.state_vector() = np.array([1/np.sqrt(2), 1/np.sqrt(2)], dtype=np.complex64) qubits = None gives us \rho = \begin{bmatrix} 0.5 & 0.5 0.5 & 0.5 \end{bmatrix} Args: qubits: list containing qubit IDs that you would like to include in the density matrix (i.e.) qubits that WON'T be traced out. Returns: A numpy array representing the density matrix. Raises: ValueError: if the size of the state represents more than 25 qubits. IndexError: if the indices are out of range for the number of qubits corresponding to the state. """ return density_matrix_from_state_vector( self.state_vector(), [self.qubit_map[q] for q in qubits] if qubits is not None else None )
[ "def", "density_matrix_of", "(", "self", ",", "qubits", ":", "List", "[", "ops", ".", "Qid", "]", "=", "None", ")", "->", "np", ".", "ndarray", ":", "return", "density_matrix_from_state_vector", "(", "self", ".", "state_vector", "(", ")", ",", "[", "self...
r"""Returns the density matrix of the state. Calculate the density matrix for the system on the list, qubits. Any qubits not in the list that are present in self.state_vector() will be traced out. If qubits is None the full density matrix for self.state_vector() is returned, given self.state_vector() follows standard Kronecker convention of numpy.kron. For example: self.state_vector() = np.array([1/np.sqrt(2), 1/np.sqrt(2)], dtype=np.complex64) qubits = None gives us \rho = \begin{bmatrix} 0.5 & 0.5 0.5 & 0.5 \end{bmatrix} Args: qubits: list containing qubit IDs that you would like to include in the density matrix (i.e.) qubits that WON'T be traced out. Returns: A numpy array representing the density matrix. Raises: ValueError: if the size of the state represents more than 25 qubits. IndexError: if the indices are out of range for the number of qubits corresponding to the state.
[ "r", "Returns", "the", "density", "matrix", "of", "the", "state", "." ]
python
train
osrg/ryu
ryu/lib/ovs/bridge.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/ovs/bridge.py#L318-L328
def db_get_map(self, table, record, column): """ Gets dict type value of 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get TBL REC COL """ val = self.db_get_val(table, record, column) assert isinstance(val, dict) return val
[ "def", "db_get_map", "(", "self", ",", "table", ",", "record", ",", "column", ")", ":", "val", "=", "self", ".", "db_get_val", "(", "table", ",", "record", ",", "column", ")", "assert", "isinstance", "(", "val", ",", "dict", ")", "return", "val" ]
Gets dict type value of 'column' in 'record' in 'table'. This method is corresponding to the following ovs-vsctl command:: $ ovs-vsctl get TBL REC COL
[ "Gets", "dict", "type", "value", "of", "column", "in", "record", "in", "table", "." ]
python
train
adamrehn/ue4cli
ue4cli/ThirdPartyLibraryDetails.py
https://github.com/adamrehn/ue4cli/blob/f1c34502c96059e36757b7433da7e98760a75a6f/ue4cli/ThirdPartyLibraryDetails.py#L71-L81
def getCompilerFlags(self, engineRoot, fmt): """ Constructs the compiler flags string for building against this library """ return Utility.join( fmt.delim, self.prefixedStrings(self.definitionPrefix, self.definitions, engineRoot) + self.prefixedStrings(self.includeDirPrefix, self.includeDirs, engineRoot) + self.resolveRoot(self.cxxFlags, engineRoot), fmt.quotes )
[ "def", "getCompilerFlags", "(", "self", ",", "engineRoot", ",", "fmt", ")", ":", "return", "Utility", ".", "join", "(", "fmt", ".", "delim", ",", "self", ".", "prefixedStrings", "(", "self", ".", "definitionPrefix", ",", "self", ".", "definitions", ",", ...
Constructs the compiler flags string for building against this library
[ "Constructs", "the", "compiler", "flags", "string", "for", "building", "against", "this", "library" ]
python
train
transifex/transifex-python-library
txlib/api/base.py
https://github.com/transifex/transifex-python-library/blob/9fea86b718973de35ccca6d54bd1f445c9632406/txlib/api/base.py#L264-L269
def _update(self, **kwargs): """Update a resource in a remote Transifex server.""" path = self._construct_path_to_item() if not kwargs: return return self._http.put(path, json.dumps(kwargs))
[ "def", "_update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_construct_path_to_item", "(", ")", "if", "not", "kwargs", ":", "return", "return", "self", ".", "_http", ".", "put", "(", "path", ",", "json", ".", "dumps",...
Update a resource in a remote Transifex server.
[ "Update", "a", "resource", "in", "a", "remote", "Transifex", "server", "." ]
python
train
markovmodel/msmtools
msmtools/estimation/sparse/newton/linsolve.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/estimation/sparse/newton/linsolve.py#L183-L228
def solve_factorized_aug(z, Fval, LU, G, A): M, N=G.shape P, N=A.shape """Total number of inequality constraints""" m = M """Primal variable""" x = z[0:N] """Multiplier for equality constraints""" nu = z[N:N+P] """Multiplier for inequality constraints""" l = z[N+P:N+P+M] """Slacks""" s = z[N+P+M:] """Dual infeasibility""" rd = Fval[0:N] """Primal infeasibility""" rp1 = Fval[N:N+P] rp2 = Fval[N+P:N+P+M] """Centrality""" rc = Fval[N+P+M:] """Sigma matrix""" SIG = diags(l/s, 0) """RHS for condensed system""" b1 = -rd - mydot(G.T, mydot(SIG, rp2)) + mydot(G.T, rc/s) b2 = -rp1 b = np.hstack((b1, b2)) dxnu = mysolve(LU, b) dx = dxnu[0:N] dnu = dxnu[N:] """Obtain search directions for l and s""" ds = -rp2 - mydot(G, dx) dl = -mydot(SIG, ds) - rc/s dz = np.hstack((dx, dnu, dl, ds)) return dz
[ "def", "solve_factorized_aug", "(", "z", ",", "Fval", ",", "LU", ",", "G", ",", "A", ")", ":", "M", ",", "N", "=", "G", ".", "shape", "P", ",", "N", "=", "A", ".", "shape", "m", "=", "M", "\"\"\"Primal variable\"\"\"", "x", "=", "z", "[", "0", ...
Total number of inequality constraints
[ "Total", "number", "of", "inequality", "constraints" ]
python
train
cherrypy/cheroot
cheroot/server.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/server.py#L1083-L1118
def simple_response(self, status, msg=''): """Write a simple response back to the client.""" status = str(status) proto_status = '%s %s\r\n' % (self.server.protocol, status) content_length = 'Content-Length: %s\r\n' % len(msg) content_type = 'Content-Type: text/plain\r\n' buf = [ proto_status.encode('ISO-8859-1'), content_length.encode('ISO-8859-1'), content_type.encode('ISO-8859-1'), ] if status[:3] in ('413', '414'): # Request Entity Too Large / Request-URI Too Long self.close_connection = True if self.response_protocol == 'HTTP/1.1': # This will not be true for 414, since read_request_line # usually raises 414 before reading the whole line, and we # therefore cannot know the proper response_protocol. buf.append(b'Connection: close\r\n') else: # HTTP/1.0 had no 413/414 status nor Connection header. # Emit 400 instead and trust the message body is enough. status = '400 Bad Request' buf.append(CRLF) if msg: if isinstance(msg, six.text_type): msg = msg.encode('ISO-8859-1') buf.append(msg) try: self.conn.wfile.write(EMPTY.join(buf)) except socket.error as ex: if ex.args[0] not in errors.socket_errors_to_ignore: raise
[ "def", "simple_response", "(", "self", ",", "status", ",", "msg", "=", "''", ")", ":", "status", "=", "str", "(", "status", ")", "proto_status", "=", "'%s %s\\r\\n'", "%", "(", "self", ".", "server", ".", "protocol", ",", "status", ")", "content_length",...
Write a simple response back to the client.
[ "Write", "a", "simple", "response", "back", "to", "the", "client", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L530-L542
def line_nbr_from_position(self, y_pos): """ Returns the line number from the y_pos. :param y_pos: Y pos in the editor :return: Line number (0 based), -1 if out of range """ editor = self._editor height = editor.fontMetrics().height() for top, line, block in editor.visible_blocks: if top <= y_pos <= top + height: return line return -1
[ "def", "line_nbr_from_position", "(", "self", ",", "y_pos", ")", ":", "editor", "=", "self", ".", "_editor", "height", "=", "editor", ".", "fontMetrics", "(", ")", ".", "height", "(", ")", "for", "top", ",", "line", ",", "block", "in", "editor", ".", ...
Returns the line number from the y_pos. :param y_pos: Y pos in the editor :return: Line number (0 based), -1 if out of range
[ "Returns", "the", "line", "number", "from", "the", "y_pos", "." ]
python
train
tensorflow/mesh
mesh_tensorflow/transformer/transformer.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/transformer.py#L946-L955
def make_layer_stack(layers=gin.REQUIRED, num_layers=6): """Configurable layer stack. Args: layers: a list of subclasses of TransformerLayer num_layers: an integer Returns: a LayerStack """ return LayerStack([cls() for cls in layers] * num_layers)
[ "def", "make_layer_stack", "(", "layers", "=", "gin", ".", "REQUIRED", ",", "num_layers", "=", "6", ")", ":", "return", "LayerStack", "(", "[", "cls", "(", ")", "for", "cls", "in", "layers", "]", "*", "num_layers", ")" ]
Configurable layer stack. Args: layers: a list of subclasses of TransformerLayer num_layers: an integer Returns: a LayerStack
[ "Configurable", "layer", "stack", "." ]
python
train
juanifioren/django-oidc-provider
oidc_provider/lib/utils/token.py
https://github.com/juanifioren/django-oidc-provider/blob/f0daed07b2ac7608565b80d4c80ccf04d8c416a8/oidc_provider/lib/utils/token.py#L72-L79
def encode_id_token(payload, client): """ Represent the ID Token as a JSON Web Token (JWT). Return a hash. """ keys = get_client_alg_keys(client) _jws = JWS(payload, alg=client.jwt_alg) return _jws.sign_compact(keys)
[ "def", "encode_id_token", "(", "payload", ",", "client", ")", ":", "keys", "=", "get_client_alg_keys", "(", "client", ")", "_jws", "=", "JWS", "(", "payload", ",", "alg", "=", "client", ".", "jwt_alg", ")", "return", "_jws", ".", "sign_compact", "(", "ke...
Represent the ID Token as a JSON Web Token (JWT). Return a hash.
[ "Represent", "the", "ID", "Token", "as", "a", "JSON", "Web", "Token", "(", "JWT", ")", ".", "Return", "a", "hash", "." ]
python
train