code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def filter_pem(data): '''Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate ''' assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data)) certs = set() new_list = [] in_pem_block = False for line in re.split(br'[\r\n]+', data): if line == b'-----BEGIN CERTIFICATE-----': assert not in_pem_block in_pem_block = True elif line == b'-----END CERTIFICATE-----': assert in_pem_block in_pem_block = False content = b''.join(new_list) content = rewrap_bytes(content) certs.add(b'-----BEGIN CERTIFICATE-----\n' + content + b'\n-----END CERTIFICATE-----\n') new_list = [] elif in_pem_block: new_list.append(line) return certs
Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate
Below is the the instruction that describes the task: ### Input: Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate ### Response: def filter_pem(data): '''Processes the bytes for PEM certificates. Returns: ``set`` containing each certificate ''' assert isinstance(data, bytes), 'Expect bytes. Got {}.'.format(type(data)) certs = set() new_list = [] in_pem_block = False for line in re.split(br'[\r\n]+', data): if line == b'-----BEGIN CERTIFICATE-----': assert not in_pem_block in_pem_block = True elif line == b'-----END CERTIFICATE-----': assert in_pem_block in_pem_block = False content = b''.join(new_list) content = rewrap_bytes(content) certs.add(b'-----BEGIN CERTIFICATE-----\n' + content + b'\n-----END CERTIFICATE-----\n') new_list = [] elif in_pem_block: new_list.append(line) return certs
def vlink(s_expnum, s_ccd, s_version, s_ext, l_expnum, l_ccd, l_version, l_ext, s_prefix=None, l_prefix=None): """make a link between two version of a file. @param s_expnum: @param s_ccd: @param s_version: @param s_ext: @param l_expnum: @param l_ccd: @param l_version: @param l_ext: @param s_prefix: @param l_prefix: @return: """ source_uri = get_uri(s_expnum, ccd=s_ccd, version=s_version, ext=s_ext, prefix=s_prefix) link_uri = get_uri(l_expnum, ccd=l_ccd, version=l_version, ext=l_ext, prefix=l_prefix) return client.link(source_uri, link_uri)
make a link between two version of a file. @param s_expnum: @param s_ccd: @param s_version: @param s_ext: @param l_expnum: @param l_ccd: @param l_version: @param l_ext: @param s_prefix: @param l_prefix: @return:
Below is the the instruction that describes the task: ### Input: make a link between two version of a file. @param s_expnum: @param s_ccd: @param s_version: @param s_ext: @param l_expnum: @param l_ccd: @param l_version: @param l_ext: @param s_prefix: @param l_prefix: @return: ### Response: def vlink(s_expnum, s_ccd, s_version, s_ext, l_expnum, l_ccd, l_version, l_ext, s_prefix=None, l_prefix=None): """make a link between two version of a file. @param s_expnum: @param s_ccd: @param s_version: @param s_ext: @param l_expnum: @param l_ccd: @param l_version: @param l_ext: @param s_prefix: @param l_prefix: @return: """ source_uri = get_uri(s_expnum, ccd=s_ccd, version=s_version, ext=s_ext, prefix=s_prefix) link_uri = get_uri(l_expnum, ccd=l_ccd, version=l_version, ext=l_ext, prefix=l_prefix) return client.link(source_uri, link_uri)
def handleStatus(self, version, status, message): """Save the status code for processing when we get to the end of the headers.""" self.status = status self.message = message
Save the status code for processing when we get to the end of the headers.
Below is the the instruction that describes the task: ### Input: Save the status code for processing when we get to the end of the headers. ### Response: def handleStatus(self, version, status, message): """Save the status code for processing when we get to the end of the headers.""" self.status = status self.message = message
def sg_aconv1d(tensor, opt): r"""Applies 1-D atrous (or dilated) convolution. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: causal: Boolean. If True, zeros are padded before the time axis such that each activation unit doesn't have receptive neurons beyond the equivalent time step. size: A positive `integer` representing `[kernel width]`. As a default it is set to 2 if causal is True, 3 otherwise. rate: A positive `integer`. The stride with which we sample input values across the `height` and `width` dimensions. Default is 1. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`. """ # default options opt += tf.sg_opt(size=(2 if opt.causal else 3), rate=1, pad='SAME') # parameter tf.sg_initializer w = tf.sg_initializer.he_uniform('W', (1, opt.size, opt.in_dim, opt.dim), regularizer=opt.regularizer, summary=opt.summary) b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0 if opt.causal: # pre-padding for causality if opt.pad == 'SAME': pad_len = (opt.size - 1) * opt.rate # padding size x = tf.pad(tensor, [[0, 0], [pad_len, 0], [0, 0]]).sg_expand_dims(axis=1) else: x = tensor.sg_expand_dims(axis=1) # apply 2d convolution out = tf.nn.atrous_conv2d(x, w, rate=opt.rate, padding='VALID') + b else: # apply 2d convolution out = tf.nn.atrous_conv2d(tensor.sg_expand_dims(axis=1), w, rate=opt.rate, padding=opt.pad) + b # reduce dimension # noinspection PyUnresolvedReferences out = out.sg_squeeze(axis=1) return out
r"""Applies 1-D atrous (or dilated) convolution. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: causal: Boolean. If True, zeros are padded before the time axis such that each activation unit doesn't have receptive neurons beyond the equivalent time step. size: A positive `integer` representing `[kernel width]`. As a default it is set to 2 if causal is True, 3 otherwise. rate: A positive `integer`. The stride with which we sample input values across the `height` and `width` dimensions. Default is 1. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`.
Below is the the instruction that describes the task: ### Input: r"""Applies 1-D atrous (or dilated) convolution. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: causal: Boolean. If True, zeros are padded before the time axis such that each activation unit doesn't have receptive neurons beyond the equivalent time step. size: A positive `integer` representing `[kernel width]`. As a default it is set to 2 if causal is True, 3 otherwise. rate: A positive `integer`. The stride with which we sample input values across the `height` and `width` dimensions. Default is 1. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`. ### Response: def sg_aconv1d(tensor, opt): r"""Applies 1-D atrous (or dilated) convolution. Args: tensor: A 3-D `Tensor` (automatically passed by decorator). opt: causal: Boolean. If True, zeros are padded before the time axis such that each activation unit doesn't have receptive neurons beyond the equivalent time step. size: A positive `integer` representing `[kernel width]`. As a default it is set to 2 if causal is True, 3 otherwise. rate: A positive `integer`. The stride with which we sample input values across the `height` and `width` dimensions. Default is 1. in_dim: A positive `integer`. The size of input dimension. dim: A positive `integer`. The size of output dimension. pad: Either `SAME` (Default) or `VALID`. bias: Boolean. If True, biases are added. regularizer: A (Tensor -> Tensor or None) function; the result of applying it on a newly created variable will be added to the collection tf.GraphKeys.REGULARIZATION_LOSSES and can be used for regularization summary: If True, summaries are added. The default is True. Returns: A `Tensor` with the same type as `tensor`. """ # default options opt += tf.sg_opt(size=(2 if opt.causal else 3), rate=1, pad='SAME') # parameter tf.sg_initializer w = tf.sg_initializer.he_uniform('W', (1, opt.size, opt.in_dim, opt.dim), regularizer=opt.regularizer, summary=opt.summary) b = tf.sg_initializer.constant('b', opt.dim, summary=opt.summary) if opt.bias else 0 if opt.causal: # pre-padding for causality if opt.pad == 'SAME': pad_len = (opt.size - 1) * opt.rate # padding size x = tf.pad(tensor, [[0, 0], [pad_len, 0], [0, 0]]).sg_expand_dims(axis=1) else: x = tensor.sg_expand_dims(axis=1) # apply 2d convolution out = tf.nn.atrous_conv2d(x, w, rate=opt.rate, padding='VALID') + b else: # apply 2d convolution out = tf.nn.atrous_conv2d(tensor.sg_expand_dims(axis=1), w, rate=opt.rate, padding=opt.pad) + b # reduce dimension # noinspection PyUnresolvedReferences out = out.sg_squeeze(axis=1) return out
def do_debug(self, args, arguments): """ :: Usage: debug on debug off Turns the debug log level on and off. """ filename = path_expand("~/.cloudmesh/cmd3.yaml") config = ConfigDict(filename=filename) if arguments['on']: self.set_debug(True) elif arguments['off']: self.set_debug(False)
:: Usage: debug on debug off Turns the debug log level on and off.
Below is the the instruction that describes the task: ### Input: :: Usage: debug on debug off Turns the debug log level on and off. ### Response: def do_debug(self, args, arguments): """ :: Usage: debug on debug off Turns the debug log level on and off. """ filename = path_expand("~/.cloudmesh/cmd3.yaml") config = ConfigDict(filename=filename) if arguments['on']: self.set_debug(True) elif arguments['off']: self.set_debug(False)
def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True): """ Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs) """ if isinstance(token, Token): bits = token.split_contents() else: bits = token expect_kwarg = False args = [] kwargs = {} prev_bit = None tag_name = bits[0] for bit in bits[1::]: kwarg_match = kwarg_re.match(bit) if kwarg_match: # Keyword argument expect_kwarg = True (name, expr) = bit.split('=', 2) kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr else: # Still at positioned arguments. if expect_kwarg: raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit)) args.append(parser.compile_filter(bit) if compile_args else bit) prev_bit = bit # Validate the allowed arguments, to make things easier for template developers if allowed_kwargs is not None and kwargs: if not allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.") for name in kwargs: if name not in allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs))) return tag_name, args, kwargs
Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs)
Below is the the instruction that describes the task: ### Input: Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs) ### Response: def parse_token_kwargs(parser, token, allowed_kwargs=None, compile_args=True, compile_kwargs=True): """ Allow the template tag arguments to be like a normal Python function, with *args and **kwargs. :param parser: The "parser" object that ``@register.tag`` provides. :type parser: :class:`~django.template.Parser` :param token: The "token" object that ``@register.tag`` provides. :type token: :class:`~django.template.Token` or splitted bits :param compile_args: Whether the arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param compile_kwargs: Whether the keyword arguments should be compiled using :func:`parser.compile_filter <django.template.Parser.compile_filter>`. :param allowed_kwargs: A list of allowed keyword arguments. A value of ``None`` disables the check. :type allowed_kwargs: tuple :return: The tag name, arguments and keyword arguments. :rtype: tuple(tag_name, args, kwargs) """ if isinstance(token, Token): bits = token.split_contents() else: bits = token expect_kwarg = False args = [] kwargs = {} prev_bit = None tag_name = bits[0] for bit in bits[1::]: kwarg_match = kwarg_re.match(bit) if kwarg_match: # Keyword argument expect_kwarg = True (name, expr) = bit.split('=', 2) kwargs[name] = parser.compile_filter(expr) if compile_kwargs else expr else: # Still at positioned arguments. if expect_kwarg: raise TemplateSyntaxError("{0} tag may not have a non-keyword argument ({1}) after a keyword argument ({2}).".format(bits[0], bit, prev_bit)) args.append(parser.compile_filter(bit) if compile_args else bit) prev_bit = bit # Validate the allowed arguments, to make things easier for template developers if allowed_kwargs is not None and kwargs: if not allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nNo keyword arguments are allowed.") for name in kwargs: if name not in allowed_kwargs: raise TemplateSyntaxError("The option %s=... cannot be used in '%s'.\nPossible options are: %s." % (name, bits[0], ", ".join(allowed_kwargs))) return tag_name, args, kwargs
def mode_assignment(arg): """ Translates arg to enforce proper assignment """ arg = arg.upper() stream_args = ('STREAM', 'CONSOLE', 'STDOUT') try: if arg in stream_args: return 'STREAM' else: return arg except Exception: return None
Translates arg to enforce proper assignment
Below is the the instruction that describes the task: ### Input: Translates arg to enforce proper assignment ### Response: def mode_assignment(arg): """ Translates arg to enforce proper assignment """ arg = arg.upper() stream_args = ('STREAM', 'CONSOLE', 'STDOUT') try: if arg in stream_args: return 'STREAM' else: return arg except Exception: return None
def process_docstring(app, what, name, obj, options, lines): """Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list of docstring lines that `_process_docstring` modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * ``napoleon_google_docstring`` -- parse Google style docstrings * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process. what : str A string specifying the type of the object to which the docstring belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The fully qualified name of the object. obj : module, class, exception, function, method, or attribute The object to which the docstring belongs. options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. lines : list of str The lines of the docstring, see above. .. note:: `lines` is modified *in place* Notes ----- This function is (to most parts) taken from the :mod:`sphinx.ext.napoleon` module, sphinx version 1.3.1, and adapted to the classes defined here""" result_lines = lines if app.config.napoleon_numpy_docstring: docstring = ExtendedNumpyDocstring( result_lines, app.config, app, what, name, obj, options) result_lines = docstring.lines() if app.config.napoleon_google_docstring: docstring = ExtendedGoogleDocstring( result_lines, app.config, app, what, name, obj, options) result_lines = docstring.lines() lines[:] = result_lines[:]
Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list of docstring lines that `_process_docstring` modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * ``napoleon_google_docstring`` -- parse Google style docstrings * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process. what : str A string specifying the type of the object to which the docstring belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The fully qualified name of the object. obj : module, class, exception, function, method, or attribute The object to which the docstring belongs. options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. lines : list of str The lines of the docstring, see above. .. note:: `lines` is modified *in place* Notes ----- This function is (to most parts) taken from the :mod:`sphinx.ext.napoleon` module, sphinx version 1.3.1, and adapted to the classes defined here
Below is the the instruction that describes the task: ### Input: Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list of docstring lines that `_process_docstring` modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * ``napoleon_google_docstring`` -- parse Google style docstrings * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process. what : str A string specifying the type of the object to which the docstring belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The fully qualified name of the object. obj : module, class, exception, function, method, or attribute The object to which the docstring belongs. options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. lines : list of str The lines of the docstring, see above. .. note:: `lines` is modified *in place* Notes ----- This function is (to most parts) taken from the :mod:`sphinx.ext.napoleon` module, sphinx version 1.3.1, and adapted to the classes defined here ### Response: def process_docstring(app, what, name, obj, options, lines): """Process the docstring for a given python object. Called when autodoc has read and processed a docstring. `lines` is a list of docstring lines that `_process_docstring` modifies in place to change what Sphinx outputs. The following settings in conf.py control what styles of docstrings will be parsed: * ``napoleon_google_docstring`` -- parse Google style docstrings * ``napoleon_numpy_docstring`` -- parse NumPy style docstrings Parameters ---------- app : sphinx.application.Sphinx Application object representing the Sphinx process. what : str A string specifying the type of the object to which the docstring belongs. Valid values: "module", "class", "exception", "function", "method", "attribute". name : str The fully qualified name of the object. obj : module, class, exception, function, method, or attribute The object to which the docstring belongs. options : sphinx.ext.autodoc.Options The options given to the directive: an object with attributes inherited_members, undoc_members, show_inheritance and noindex that are True if the flag option of same name was given to the auto directive. lines : list of str The lines of the docstring, see above. .. note:: `lines` is modified *in place* Notes ----- This function is (to most parts) taken from the :mod:`sphinx.ext.napoleon` module, sphinx version 1.3.1, and adapted to the classes defined here""" result_lines = lines if app.config.napoleon_numpy_docstring: docstring = ExtendedNumpyDocstring( result_lines, app.config, app, what, name, obj, options) result_lines = docstring.lines() if app.config.napoleon_google_docstring: docstring = ExtendedGoogleDocstring( result_lines, app.config, app, what, name, obj, options) result_lines = docstring.lines() lines[:] = result_lines[:]
def add_val(self, val): """add value in form of dict""" if not isinstance(val, type({})): raise ValueError(type({})) self.read() self.config.update(val) self.save()
add value in form of dict
Below is the the instruction that describes the task: ### Input: add value in form of dict ### Response: def add_val(self, val): """add value in form of dict""" if not isinstance(val, type({})): raise ValueError(type({})) self.read() self.config.update(val) self.save()
def _read_source(self) -> None: """ Reads the source file. """ with open(self.full_path, "rt") as f: for linenum, line_with_nl in enumerate(f.readlines(), start=1): line_without_newline = ( line_with_nl[:-1] if line_with_nl.endswith(NL) else line_with_nl ) if TAB in line_without_newline: self._warn("Tab character at line {}".format(linenum)) if CR in line_without_newline: self._warn("Carriage return character at line {} " "(Windows CR+LF endings?)".format(linenum)) self.source_lines.append(line_without_newline)
Reads the source file.
Below is the the instruction that describes the task: ### Input: Reads the source file. ### Response: def _read_source(self) -> None: """ Reads the source file. """ with open(self.full_path, "rt") as f: for linenum, line_with_nl in enumerate(f.readlines(), start=1): line_without_newline = ( line_with_nl[:-1] if line_with_nl.endswith(NL) else line_with_nl ) if TAB in line_without_newline: self._warn("Tab character at line {}".format(linenum)) if CR in line_without_newline: self._warn("Carriage return character at line {} " "(Windows CR+LF endings?)".format(linenum)) self.source_lines.append(line_without_newline)
def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" sub_str = '%(read)s %%k %(read)s %(write)s %(data)s qeth' % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str = '# Configure qeth device at' rules_str += ' %(read)s/%(write)s/%(data)s\n' % { 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==' '\"qeth\", IMPORT{program}=\"collect %s\"\n') % sub_str rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}="collect %(channel)s\"\n') % { 'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'data': channel_data, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"drivers\", KERNEL==\"' 'qeth\", IMPORT{program}=\"collect --remove %s\"\n') % sub_str rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'data': channel_data, 'channel': sub_str} rules_str += ('TEST==\"[ccwgroup/%(read)s]\", GOTO=\"qeth-%(read)s' '-end\"\n') % {'read': channel_read, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", ENV{COLLECT_' '%(read)s}==\"0\", ATTR{[drivers/ccwgroup:qeth]group}=\"' '%(read)s,%(write)s,%(data)s\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"qeth' '\", ENV{COLLECT_%(read)s}==\"0\", ATTR{[drivers/' 'ccwgroup:qeth]group}=\"%(read)s,%(write)s,%(data)s\"\n' 'LABEL=\"qeth-%(read)s-end\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{layer2}=\"1\"\n') % channel_read rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{online}=\"1\"\n') % channel_read return rules_str
construct udev rules info.
Below is the the instruction that describes the task: ### Input: construct udev rules info. ### Response: def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" sub_str = '%(read)s %%k %(read)s %(write)s %(data)s qeth' % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str = '# Configure qeth device at' rules_str += ' %(read)s/%(write)s/%(data)s\n' % { 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==' '\"qeth\", IMPORT{program}=\"collect %s\"\n') % sub_str rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}="collect %(channel)s\"\n') % { 'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'data': channel_data, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"drivers\", KERNEL==\"' 'qeth\", IMPORT{program}=\"collect --remove %s\"\n') % sub_str rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'data': channel_data, 'channel': sub_str} rules_str += ('TEST==\"[ccwgroup/%(read)s]\", GOTO=\"qeth-%(read)s' '-end\"\n') % {'read': channel_read, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", ENV{COLLECT_' '%(read)s}==\"0\", ATTR{[drivers/ccwgroup:qeth]group}=\"' '%(read)s,%(write)s,%(data)s\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"qeth' '\", ENV{COLLECT_%(read)s}==\"0\", ATTR{[drivers/' 'ccwgroup:qeth]group}=\"%(read)s,%(write)s,%(data)s\"\n' 'LABEL=\"qeth-%(read)s-end\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{layer2}=\"1\"\n') % channel_read rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{online}=\"1\"\n') % channel_read return rules_str
def randomChildElement(self, node): """choose a random child element of a node This is a utility method used by do_xref and do_choice. """ choices = [e for e in node.childNodes if e.nodeType == e.ELEMENT_NODE] chosen = random.choice(choices) if _debug: sys.stderr.write('%s available choices: %s\n' % \ (len(choices), [e.toxml() for e in choices])) sys.stderr.write('Chosen: %s\n' % chosen.toxml()) return chosen
choose a random child element of a node This is a utility method used by do_xref and do_choice.
Below is the the instruction that describes the task: ### Input: choose a random child element of a node This is a utility method used by do_xref and do_choice. ### Response: def randomChildElement(self, node): """choose a random child element of a node This is a utility method used by do_xref and do_choice. """ choices = [e for e in node.childNodes if e.nodeType == e.ELEMENT_NODE] chosen = random.choice(choices) if _debug: sys.stderr.write('%s available choices: %s\n' % \ (len(choices), [e.toxml() for e in choices])) sys.stderr.write('Chosen: %s\n' % chosen.toxml()) return chosen
def percentile(x, q, axis=None, interpolation=None, keep_dims=False, validate_args=False, preserve_gradients=True, name=None): """Compute the `q`-th percentile(s) of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of `q` exactly. This function is the same as the median if `q = 50`, the same as the minimum if `q = 0` and the same as the maximum if `q = 100`. Multiple percentiles can be computed at once by using `1-D` vector `q`. Dimension zero of the returned `Tensor` will index the different percentiles. Compare to `numpy.percentile`. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, `x` must have statically known number of dimensions. q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s). axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis that index independent samples over which to return the desired percentile. If `None` (the default), treat every dimension as a sample dimension, returning a scalar. interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value: 'nearest'. This specifies the interpolation method to use when the desired quantile lies between two data points `i < j`: * linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. * lower: `i`. * higher: `j`. * nearest: `i` or `j`, whichever is nearest. * midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not work with integer dtypes. keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1 If `False`, the last dimension is removed from the output shape. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t the percentile `q` is preserved in the case of linear interpolation. If `False`, the gradient will be (incorrectly) zero when `q` corresponds to a point in `x`. name: A Python string name to give this `Op`. Default is 'percentile' Returns: A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions index quantiles for different values of `q`. Raises: ValueError: If argument 'interpolation' is not an allowed type. ValueError: If interpolation type not compatible with `dtype`. #### Examples ```python # Get 30th percentile with default ('nearest') interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30.) ==> 2.0 # Get 30th percentile with 'linear' interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30., interpolation='linear') ==> 1.9 # Get 30th and 70th percentiles with 'lower' interpolation x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=[30., 70.], interpolation='lower') ==> [1., 3.] # Get 100th percentile (maximum). By default, this is computed over every dim x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100.) ==> 4. # Treat the leading dim as indexing samples, and find the 100th quantile (max) # over all such samples. x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100., axis=[0]) ==> [3., 4.] ``` """ name = name or 'percentile' allowed_interpolations = {'linear', 'lower', 'higher', 'nearest', 'midpoint'} if interpolation is None: interpolation = 'nearest' else: if interpolation not in allowed_interpolations: raise ValueError('Argument `interpolation` must be in %s. Found %s' % (allowed_interpolations, interpolation)) with tf.compat.v1.name_scope(name, values=[x, q]): x = tf.convert_to_tensor(value=x, name='x') if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer: raise TypeError('{} interpolation not allowed with dtype {}'.format( interpolation, x.dtype)) # Double is needed here and below, else we get the wrong index if the array # is huge along axis. q = tf.cast(q, tf.float64) _get_static_ndims(q, expect_ndims_no_more_than=1) if validate_args: q = distribution_util.with_dependencies([ tf.compat.v1.assert_rank_in(q, [0, 1]), tf.compat.v1.assert_greater_equal(q, tf.cast(0., tf.float64)), tf.compat.v1.assert_less_equal(q, tf.cast(100., tf.float64)) ], q) # Move `axis` dims of `x` to the rightmost, call it `y`. if axis is None: y = tf.reshape(x, [-1]) else: x_ndims = _get_static_ndims( x, expect_static=True, expect_ndims_at_least=1) axis = _make_static_axis_non_negative_list(axis, x_ndims) y = _move_dims_to_flat_end(x, axis, x_ndims, right_end=True) frac_at_q_or_above = 1. - q / 100. # Sort everything, not just the top 'k' entries, which allows multiple calls # to sort only once (under the hood) and use CSE. sorted_y = _sort_tensor(y) d = tf.cast(tf.shape(input=y)[-1], tf.float64) def _get_indices(interp_type): """Get values of y at the indices implied by interp_type.""" # Note `lower` <--> ceiling. Confusing, huh? Due to the fact that # _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher # index, but the lower value of y! if interp_type == 'lower': indices = tf.math.ceil((d - 1) * frac_at_q_or_above) elif interp_type == 'higher': indices = tf.floor((d - 1) * frac_at_q_or_above) elif interp_type == 'nearest': indices = tf.round((d - 1) * frac_at_q_or_above) # d - 1 will be distinct from d in int32, but not necessarily double. # So clip to avoid out of bounds errors. return tf.clip_by_value( tf.cast(indices, tf.int32), 0, tf.shape(input=y)[-1] - 1) if interpolation in ['nearest', 'lower', 'higher']: gathered_y = tf.gather(sorted_y, _get_indices(interpolation), axis=-1) elif interpolation == 'midpoint': gathered_y = 0.5 * ( tf.gather(sorted_y, _get_indices('lower'), axis=-1) + tf.gather(sorted_y, _get_indices('higher'), axis=-1)) elif interpolation == 'linear': # Copy-paste of docstring on interpolation: # linear: i + (j - i) * fraction, where fraction is the fractional part # of the index surrounded by i and j. larger_y_idx = _get_indices('lower') exact_idx = (d - 1) * frac_at_q_or_above if preserve_gradients: # If q corresponds to a point in x, we will initially have # larger_y_idx == smaller_y_idx. # This results in the gradient w.r.t. fraction being zero (recall `q` # enters only through `fraction`...and see that things cancel). # The fix is to ensure that smaller_y_idx and larger_y_idx are always # separated by exactly 1. smaller_y_idx = tf.maximum(larger_y_idx - 1, 0) larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(input=y)[-1] - 1) fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx else: smaller_y_idx = _get_indices('higher') fraction = tf.math.ceil((d - 1) * frac_at_q_or_above) - exact_idx fraction = tf.cast(fraction, y.dtype) gathered_y = ( tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) + tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction) # Propagate NaNs if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64): # Apparently tf.is_nan doesn't like other dtypes nan_batch_members = tf.reduce_any( input_tensor=tf.math.is_nan(x), axis=axis) right_rank_matched_shape = tf.pad( tensor=tf.shape(input=nan_batch_members), paddings=[[0, tf.rank(input=q)]], constant_values=1) nan_batch_members = tf.reshape( nan_batch_members, shape=right_rank_matched_shape) shape_gathered_y = tf.shape(input=gathered_y) nan = np.array(np.nan, gathered_y.dtype.as_numpy_dtype) gathered_y = tf.where( tf.broadcast_to(nan_batch_members, shape_gathered_y), tf.fill(shape_gathered_y, nan), gathered_y) # Expand dimensions if requested if keep_dims: if axis is None: ones_vec = tf.ones( shape=[_get_best_effort_ndims(x) + _get_best_effort_ndims(q)], dtype=tf.int32) gathered_y *= tf.ones(ones_vec, dtype=x.dtype) else: gathered_y = _insert_back_keep_dims(gathered_y, axis) # If q is a scalar, then result has the right shape. # If q is a vector, then result has trailing dim of shape q.shape, which # needs to be rotated to dim 0. return distribution_util.rotate_transpose(gathered_y, tf.rank(q))
Compute the `q`-th percentile(s) of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of `q` exactly. This function is the same as the median if `q = 50`, the same as the minimum if `q = 0` and the same as the maximum if `q = 100`. Multiple percentiles can be computed at once by using `1-D` vector `q`. Dimension zero of the returned `Tensor` will index the different percentiles. Compare to `numpy.percentile`. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, `x` must have statically known number of dimensions. q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s). axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis that index independent samples over which to return the desired percentile. If `None` (the default), treat every dimension as a sample dimension, returning a scalar. interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value: 'nearest'. This specifies the interpolation method to use when the desired quantile lies between two data points `i < j`: * linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. * lower: `i`. * higher: `j`. * nearest: `i` or `j`, whichever is nearest. * midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not work with integer dtypes. keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1 If `False`, the last dimension is removed from the output shape. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t the percentile `q` is preserved in the case of linear interpolation. If `False`, the gradient will be (incorrectly) zero when `q` corresponds to a point in `x`. name: A Python string name to give this `Op`. Default is 'percentile' Returns: A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions index quantiles for different values of `q`. Raises: ValueError: If argument 'interpolation' is not an allowed type. ValueError: If interpolation type not compatible with `dtype`. #### Examples ```python # Get 30th percentile with default ('nearest') interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30.) ==> 2.0 # Get 30th percentile with 'linear' interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30., interpolation='linear') ==> 1.9 # Get 30th and 70th percentiles with 'lower' interpolation x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=[30., 70.], interpolation='lower') ==> [1., 3.] # Get 100th percentile (maximum). By default, this is computed over every dim x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100.) ==> 4. # Treat the leading dim as indexing samples, and find the 100th quantile (max) # over all such samples. x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100., axis=[0]) ==> [3., 4.] ```
Below is the the instruction that describes the task: ### Input: Compute the `q`-th percentile(s) of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of `q` exactly. This function is the same as the median if `q = 50`, the same as the minimum if `q = 0` and the same as the maximum if `q = 100`. Multiple percentiles can be computed at once by using `1-D` vector `q`. Dimension zero of the returned `Tensor` will index the different percentiles. Compare to `numpy.percentile`. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, `x` must have statically known number of dimensions. q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s). axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis that index independent samples over which to return the desired percentile. If `None` (the default), treat every dimension as a sample dimension, returning a scalar. interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value: 'nearest'. This specifies the interpolation method to use when the desired quantile lies between two data points `i < j`: * linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. * lower: `i`. * higher: `j`. * nearest: `i` or `j`, whichever is nearest. * midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not work with integer dtypes. keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1 If `False`, the last dimension is removed from the output shape. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t the percentile `q` is preserved in the case of linear interpolation. If `False`, the gradient will be (incorrectly) zero when `q` corresponds to a point in `x`. name: A Python string name to give this `Op`. Default is 'percentile' Returns: A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions index quantiles for different values of `q`. Raises: ValueError: If argument 'interpolation' is not an allowed type. ValueError: If interpolation type not compatible with `dtype`. #### Examples ```python # Get 30th percentile with default ('nearest') interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30.) ==> 2.0 # Get 30th percentile with 'linear' interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30., interpolation='linear') ==> 1.9 # Get 30th and 70th percentiles with 'lower' interpolation x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=[30., 70.], interpolation='lower') ==> [1., 3.] # Get 100th percentile (maximum). By default, this is computed over every dim x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100.) ==> 4. # Treat the leading dim as indexing samples, and find the 100th quantile (max) # over all such samples. x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100., axis=[0]) ==> [3., 4.] ``` ### Response: def percentile(x, q, axis=None, interpolation=None, keep_dims=False, validate_args=False, preserve_gradients=True, name=None): """Compute the `q`-th percentile(s) of `x`. Given a vector `x`, the `q`-th percentile of `x` is the value `q / 100` of the way from the minimum to the maximum in a sorted copy of `x`. The values and distances of the two nearest neighbors as well as the `interpolation` parameter will determine the percentile if the normalized ranking does not match the location of `q` exactly. This function is the same as the median if `q = 50`, the same as the minimum if `q = 0` and the same as the maximum if `q = 100`. Multiple percentiles can be computed at once by using `1-D` vector `q`. Dimension zero of the returned `Tensor` will index the different percentiles. Compare to `numpy.percentile`. Args: x: Numeric `N-D` `Tensor` with `N > 0`. If `axis` is not `None`, `x` must have statically known number of dimensions. q: Scalar or vector `Tensor` with values in `[0, 100]`. The percentile(s). axis: Optional `0-D` or `1-D` integer `Tensor` with constant values. The axis that index independent samples over which to return the desired percentile. If `None` (the default), treat every dimension as a sample dimension, returning a scalar. interpolation : {'nearest', 'linear', 'lower', 'higher', 'midpoint'}. Default value: 'nearest'. This specifies the interpolation method to use when the desired quantile lies between two data points `i < j`: * linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. * lower: `i`. * higher: `j`. * nearest: `i` or `j`, whichever is nearest. * midpoint: (i + j) / 2. `linear` and `midpoint` interpolation do not work with integer dtypes. keep_dims: Python `bool`. If `True`, the last dimension is kept with size 1 If `False`, the last dimension is removed from the output shape. validate_args: Whether to add runtime checks of argument validity. If False, and arguments are incorrect, correct behavior is not guaranteed. preserve_gradients: Python `bool`. If `True`, ensure that gradient w.r.t the percentile `q` is preserved in the case of linear interpolation. If `False`, the gradient will be (incorrectly) zero when `q` corresponds to a point in `x`. name: A Python string name to give this `Op`. Default is 'percentile' Returns: A `(rank(q) + N - len(axis))` dimensional `Tensor` of same dtype as `x`, or, if `axis` is `None`, a `rank(q)` `Tensor`. The first `rank(q)` dimensions index quantiles for different values of `q`. Raises: ValueError: If argument 'interpolation' is not an allowed type. ValueError: If interpolation type not compatible with `dtype`. #### Examples ```python # Get 30th percentile with default ('nearest') interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30.) ==> 2.0 # Get 30th percentile with 'linear' interpolation. x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=30., interpolation='linear') ==> 1.9 # Get 30th and 70th percentiles with 'lower' interpolation x = [1., 2., 3., 4.] tfp.stats.percentile(x, q=[30., 70.], interpolation='lower') ==> [1., 3.] # Get 100th percentile (maximum). By default, this is computed over every dim x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100.) ==> 4. # Treat the leading dim as indexing samples, and find the 100th quantile (max) # over all such samples. x = [[1., 2.] [3., 4.]] tfp.stats.percentile(x, q=100., axis=[0]) ==> [3., 4.] ``` """ name = name or 'percentile' allowed_interpolations = {'linear', 'lower', 'higher', 'nearest', 'midpoint'} if interpolation is None: interpolation = 'nearest' else: if interpolation not in allowed_interpolations: raise ValueError('Argument `interpolation` must be in %s. Found %s' % (allowed_interpolations, interpolation)) with tf.compat.v1.name_scope(name, values=[x, q]): x = tf.convert_to_tensor(value=x, name='x') if interpolation in {'linear', 'midpoint'} and x.dtype.is_integer: raise TypeError('{} interpolation not allowed with dtype {}'.format( interpolation, x.dtype)) # Double is needed here and below, else we get the wrong index if the array # is huge along axis. q = tf.cast(q, tf.float64) _get_static_ndims(q, expect_ndims_no_more_than=1) if validate_args: q = distribution_util.with_dependencies([ tf.compat.v1.assert_rank_in(q, [0, 1]), tf.compat.v1.assert_greater_equal(q, tf.cast(0., tf.float64)), tf.compat.v1.assert_less_equal(q, tf.cast(100., tf.float64)) ], q) # Move `axis` dims of `x` to the rightmost, call it `y`. if axis is None: y = tf.reshape(x, [-1]) else: x_ndims = _get_static_ndims( x, expect_static=True, expect_ndims_at_least=1) axis = _make_static_axis_non_negative_list(axis, x_ndims) y = _move_dims_to_flat_end(x, axis, x_ndims, right_end=True) frac_at_q_or_above = 1. - q / 100. # Sort everything, not just the top 'k' entries, which allows multiple calls # to sort only once (under the hood) and use CSE. sorted_y = _sort_tensor(y) d = tf.cast(tf.shape(input=y)[-1], tf.float64) def _get_indices(interp_type): """Get values of y at the indices implied by interp_type.""" # Note `lower` <--> ceiling. Confusing, huh? Due to the fact that # _sort_tensor sorts highest to lowest, tf.ceil corresponds to the higher # index, but the lower value of y! if interp_type == 'lower': indices = tf.math.ceil((d - 1) * frac_at_q_or_above) elif interp_type == 'higher': indices = tf.floor((d - 1) * frac_at_q_or_above) elif interp_type == 'nearest': indices = tf.round((d - 1) * frac_at_q_or_above) # d - 1 will be distinct from d in int32, but not necessarily double. # So clip to avoid out of bounds errors. return tf.clip_by_value( tf.cast(indices, tf.int32), 0, tf.shape(input=y)[-1] - 1) if interpolation in ['nearest', 'lower', 'higher']: gathered_y = tf.gather(sorted_y, _get_indices(interpolation), axis=-1) elif interpolation == 'midpoint': gathered_y = 0.5 * ( tf.gather(sorted_y, _get_indices('lower'), axis=-1) + tf.gather(sorted_y, _get_indices('higher'), axis=-1)) elif interpolation == 'linear': # Copy-paste of docstring on interpolation: # linear: i + (j - i) * fraction, where fraction is the fractional part # of the index surrounded by i and j. larger_y_idx = _get_indices('lower') exact_idx = (d - 1) * frac_at_q_or_above if preserve_gradients: # If q corresponds to a point in x, we will initially have # larger_y_idx == smaller_y_idx. # This results in the gradient w.r.t. fraction being zero (recall `q` # enters only through `fraction`...and see that things cancel). # The fix is to ensure that smaller_y_idx and larger_y_idx are always # separated by exactly 1. smaller_y_idx = tf.maximum(larger_y_idx - 1, 0) larger_y_idx = tf.minimum(smaller_y_idx + 1, tf.shape(input=y)[-1] - 1) fraction = tf.cast(larger_y_idx, tf.float64) - exact_idx else: smaller_y_idx = _get_indices('higher') fraction = tf.math.ceil((d - 1) * frac_at_q_or_above) - exact_idx fraction = tf.cast(fraction, y.dtype) gathered_y = ( tf.gather(sorted_y, larger_y_idx, axis=-1) * (1 - fraction) + tf.gather(sorted_y, smaller_y_idx, axis=-1) * fraction) # Propagate NaNs if x.dtype in (tf.bfloat16, tf.float16, tf.float32, tf.float64): # Apparently tf.is_nan doesn't like other dtypes nan_batch_members = tf.reduce_any( input_tensor=tf.math.is_nan(x), axis=axis) right_rank_matched_shape = tf.pad( tensor=tf.shape(input=nan_batch_members), paddings=[[0, tf.rank(input=q)]], constant_values=1) nan_batch_members = tf.reshape( nan_batch_members, shape=right_rank_matched_shape) shape_gathered_y = tf.shape(input=gathered_y) nan = np.array(np.nan, gathered_y.dtype.as_numpy_dtype) gathered_y = tf.where( tf.broadcast_to(nan_batch_members, shape_gathered_y), tf.fill(shape_gathered_y, nan), gathered_y) # Expand dimensions if requested if keep_dims: if axis is None: ones_vec = tf.ones( shape=[_get_best_effort_ndims(x) + _get_best_effort_ndims(q)], dtype=tf.int32) gathered_y *= tf.ones(ones_vec, dtype=x.dtype) else: gathered_y = _insert_back_keep_dims(gathered_y, axis) # If q is a scalar, then result has the right shape. # If q is a vector, then result has trailing dim of shape q.shape, which # needs to be rotated to dim 0. return distribution_util.rotate_transpose(gathered_y, tf.rank(q))
def get_next_question(self, question_id, answered=None, reverse=False, honor_sequential=True): """Inspects question map to return the next available question. if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if reverse == True: go backwards - effectively get_previous_question if honor_sequential == True: only return questions if section or part is set to sequential items """ self._update_questions() # Make sure questions list is current question_map = self._get_question_map(question_id) # will raise NotFound() questions = list(self._my_map['questions']) if reverse: questions = questions[::-1] error_text = ' previous ' else: if 'missingResponse' in question_map: if self._is_question_sequential(question_map) and honor_sequential: raise errors.IllegalState('Next question is not yet available') error_text = ' next ' if questions[-1] == question_map: raise errors.IllegalState('No ' + error_text + ' questions available') index = questions.index(question_map) + 1 for question_map in questions[index:]: latest_question_response = question_map['responses'][0] question_answered = False # take missingResponse == UNANSWERED or NULL_RESPONSE as an unanswered question if 'missingResponse' not in latest_question_response: question_answered = True if answered is None or question_answered == answered: return self.get_question(question_map=question_map) raise errors.IllegalState('No ' + error_text + ' question matching parameters was found')
Inspects question map to return the next available question. if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if reverse == True: go backwards - effectively get_previous_question if honor_sequential == True: only return questions if section or part is set to sequential items
Below is the the instruction that describes the task: ### Input: Inspects question map to return the next available question. if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if reverse == True: go backwards - effectively get_previous_question if honor_sequential == True: only return questions if section or part is set to sequential items ### Response: def get_next_question(self, question_id, answered=None, reverse=False, honor_sequential=True): """Inspects question map to return the next available question. if answered == False: only return next unanswered question if answered == True: only return next answered question if answered in None: return next question whether answered or not if reverse == True: go backwards - effectively get_previous_question if honor_sequential == True: only return questions if section or part is set to sequential items """ self._update_questions() # Make sure questions list is current question_map = self._get_question_map(question_id) # will raise NotFound() questions = list(self._my_map['questions']) if reverse: questions = questions[::-1] error_text = ' previous ' else: if 'missingResponse' in question_map: if self._is_question_sequential(question_map) and honor_sequential: raise errors.IllegalState('Next question is not yet available') error_text = ' next ' if questions[-1] == question_map: raise errors.IllegalState('No ' + error_text + ' questions available') index = questions.index(question_map) + 1 for question_map in questions[index:]: latest_question_response = question_map['responses'][0] question_answered = False # take missingResponse == UNANSWERED or NULL_RESPONSE as an unanswered question if 'missingResponse' not in latest_question_response: question_answered = True if answered is None or question_answered == answered: return self.get_question(question_map=question_map) raise errors.IllegalState('No ' + error_text + ' question matching parameters was found')
def _feature_to_fields(f, jsonify=True): """ Convert feature to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = getattr(f, k) if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
Convert feature to tuple, for faster sqlite3 import
Below is the the instruction that describes the task: ### Input: Convert feature to tuple, for faster sqlite3 import ### Response: def _feature_to_fields(f, jsonify=True): """ Convert feature to tuple, for faster sqlite3 import """ x = [] for k in constants._keys: v = getattr(f, k) if jsonify and (k in ('attributes', 'extra')): x.append(_jsonify(v)) else: x.append(v) return tuple(x)
def fix_module(job): """ Fix for tasks without a module. Provides backwards compatibility with < 0.1.5 """ modules = settings.RQ_JOBS_MODULE if not type(modules) == tuple: modules = [modules] for module in modules: try: module_match = importlib.import_module(module) if hasattr(module_match, job.task): job.task = '{}.{}'.format(module, job.task) break except ImportError: continue return job
Fix for tasks without a module. Provides backwards compatibility with < 0.1.5
Below is the the instruction that describes the task: ### Input: Fix for tasks without a module. Provides backwards compatibility with < 0.1.5 ### Response: def fix_module(job): """ Fix for tasks without a module. Provides backwards compatibility with < 0.1.5 """ modules = settings.RQ_JOBS_MODULE if not type(modules) == tuple: modules = [modules] for module in modules: try: module_match = importlib.import_module(module) if hasattr(module_match, job.task): job.task = '{}.{}'.format(module, job.task) break except ImportError: continue return job
def _call_one_middleware(self, middleware): ''' Evaluate arguments and execute the middleware function ''' args = {} for arg in middleware['args']: if hasattr(self, arg): # same as eval() but safer for arbitrary code execution args[arg] = reduce(getattr, arg.split('.'), self) self.logger.debug('calling middleware event {}' .format(middleware['name'])) middleware['call'](**args)
Evaluate arguments and execute the middleware function
Below is the the instruction that describes the task: ### Input: Evaluate arguments and execute the middleware function ### Response: def _call_one_middleware(self, middleware): ''' Evaluate arguments and execute the middleware function ''' args = {} for arg in middleware['args']: if hasattr(self, arg): # same as eval() but safer for arbitrary code execution args[arg] = reduce(getattr, arg.split('.'), self) self.logger.debug('calling middleware event {}' .format(middleware['name'])) middleware['call'](**args)
def getTrustForJID(self, bare_jid): """ All-in-one trust information for all devices of a bare jid. The result is structured like this: { "active" : { device: int => trust_info } "inactive" : { device: int => trust_info } } where trust_info is the structure returned by getTrustForDevice. """ result = { "active" : {}, "inactive" : {} } devices = yield self.__loadActiveDevices(bare_jid) for device in devices: result["active"][device] = yield self.getTrustForDevice(bare_jid, device) devices = yield self.__loadInactiveDevices(bare_jid) for device in devices: result["inactive"][device] = yield self.getTrustForDevice(bare_jid, device) promise.returnValue(result)
All-in-one trust information for all devices of a bare jid. The result is structured like this: { "active" : { device: int => trust_info } "inactive" : { device: int => trust_info } } where trust_info is the structure returned by getTrustForDevice.
Below is the the instruction that describes the task: ### Input: All-in-one trust information for all devices of a bare jid. The result is structured like this: { "active" : { device: int => trust_info } "inactive" : { device: int => trust_info } } where trust_info is the structure returned by getTrustForDevice. ### Response: def getTrustForJID(self, bare_jid): """ All-in-one trust information for all devices of a bare jid. The result is structured like this: { "active" : { device: int => trust_info } "inactive" : { device: int => trust_info } } where trust_info is the structure returned by getTrustForDevice. """ result = { "active" : {}, "inactive" : {} } devices = yield self.__loadActiveDevices(bare_jid) for device in devices: result["active"][device] = yield self.getTrustForDevice(bare_jid, device) devices = yield self.__loadInactiveDevices(bare_jid) for device in devices: result["inactive"][device] = yield self.getTrustForDevice(bare_jid, device) promise.returnValue(result)
def dfs(graph, func, head, reverse=None): """ DEPTH FIRST SEARCH IF func RETURNS FALSE, THEN PATH IS NO LONGER TAKEN IT'S EXPECTED func TAKES 3 ARGUMENTS node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH """ todo = deque() todo.append(head) path = deque() done = set() while todo: node = todo.popleft() if node in done: path.pop() continue done.add(node) path.append(node) result = func(node, path, graph) if result: if reverse: children = graph.get_parents(node) else: children = graph.get_children(node) todo.extend(children)
DEPTH FIRST SEARCH IF func RETURNS FALSE, THEN PATH IS NO LONGER TAKEN IT'S EXPECTED func TAKES 3 ARGUMENTS node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH
Below is the the instruction that describes the task: ### Input: DEPTH FIRST SEARCH IF func RETURNS FALSE, THEN PATH IS NO LONGER TAKEN IT'S EXPECTED func TAKES 3 ARGUMENTS node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH ### Response: def dfs(graph, func, head, reverse=None): """ DEPTH FIRST SEARCH IF func RETURNS FALSE, THEN PATH IS NO LONGER TAKEN IT'S EXPECTED func TAKES 3 ARGUMENTS node - THE CURRENT NODE IN THE path - PATH FROM head TO node graph - THE WHOLE GRAPH """ todo = deque() todo.append(head) path = deque() done = set() while todo: node = todo.popleft() if node in done: path.pop() continue done.add(node) path.append(node) result = func(node, path, graph) if result: if reverse: children = graph.get_parents(node) else: children = graph.get_children(node) todo.extend(children)
def PrintRanges(type, name, ranges): """Print the ranges as an array of type named name.""" print "static const %s %s[] = {" % (type, name,) for lo, hi in ranges: print "\t{ %d, %d }," % (lo, hi) print "};"
Print the ranges as an array of type named name.
Below is the the instruction that describes the task: ### Input: Print the ranges as an array of type named name. ### Response: def PrintRanges(type, name, ranges): """Print the ranges as an array of type named name.""" print "static const %s %s[] = {" % (type, name,) for lo, hi in ranges: print "\t{ %d, %d }," % (lo, hi) print "};"
def nlmsg_type(self, value): """Message content setter.""" self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0))
Message content setter.
Below is the the instruction that describes the task: ### Input: Message content setter. ### Response: def nlmsg_type(self, value): """Message content setter.""" self.bytearray[self._get_slicers(1)] = bytearray(c_uint16(value or 0))
def fuzz_elements(self, element): """ Fuzz all elements inside the object """ try: if type(element) == dict: tmp_element = {} for key in element: if len(self.config.parameters) > 0: if self.config.exclude_parameters: fuzz = key not in self.config.parameters else: fuzz = key in self.config.parameters else: fuzz = True if fuzz: if type(element[key]) == dict: tmp_element.update({key: self.fuzz_elements(element[key])}) elif type(element[key]) == list: tmp_element.update({key: self.fuzz_elements(element[key])}) else: tmp_element.update({key: self.mutator.fuzz(element[key])}) else: tmp_element.update({key: self.fuzz_elements(element[key])}) element = tmp_element del tmp_element elif type(element) == list: arr = [] for key in element: if type(key) == dict: arr.append(self.fuzz_elements(key)) elif type(key) == list: arr.append(self.fuzz_elements(key)) else: if len(self.config.parameters) <= 0: arr.append(self.mutator.fuzz(key)) else: arr.append(key) element = arr del arr except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) return element
Fuzz all elements inside the object
Below is the the instruction that describes the task: ### Input: Fuzz all elements inside the object ### Response: def fuzz_elements(self, element): """ Fuzz all elements inside the object """ try: if type(element) == dict: tmp_element = {} for key in element: if len(self.config.parameters) > 0: if self.config.exclude_parameters: fuzz = key not in self.config.parameters else: fuzz = key in self.config.parameters else: fuzz = True if fuzz: if type(element[key]) == dict: tmp_element.update({key: self.fuzz_elements(element[key])}) elif type(element[key]) == list: tmp_element.update({key: self.fuzz_elements(element[key])}) else: tmp_element.update({key: self.mutator.fuzz(element[key])}) else: tmp_element.update({key: self.fuzz_elements(element[key])}) element = tmp_element del tmp_element elif type(element) == list: arr = [] for key in element: if type(key) == dict: arr.append(self.fuzz_elements(key)) elif type(key) == list: arr.append(self.fuzz_elements(key)) else: if len(self.config.parameters) <= 0: arr.append(self.mutator.fuzz(key)) else: arr.append(key) element = arr del arr except Exception as e: raise PJFBaseException(e.message if hasattr(e, "message") else str(e)) return element
def project_data_dir(self, *args) -> str: """ Directory where to store data """ return os.path.normpath(os.path.join(self.project_dir, 'data', *args))
Directory where to store data
Below is the the instruction that describes the task: ### Input: Directory where to store data ### Response: def project_data_dir(self, *args) -> str: """ Directory where to store data """ return os.path.normpath(os.path.join(self.project_dir, 'data', *args))
def prepare(self): """When connecting start the next connection step and schedule next `prepare` call, when connected return `HandlerReady()` """ with self._lock: if self._socket: self._socket.listen(SOMAXCONN) self._socket.setblocking(False) return HandlerReady()
When connecting start the next connection step and schedule next `prepare` call, when connected return `HandlerReady()`
Below is the the instruction that describes the task: ### Input: When connecting start the next connection step and schedule next `prepare` call, when connected return `HandlerReady()` ### Response: def prepare(self): """When connecting start the next connection step and schedule next `prepare` call, when connected return `HandlerReady()` """ with self._lock: if self._socket: self._socket.listen(SOMAXCONN) self._socket.setblocking(False) return HandlerReady()
def render_image1(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. NOTE: this version uses a Figure.FigImage to render the image. """ self.logger.debug("redraw surface") if self.figure is None: return ## left, bottom, width, height = self.ax_img.bbox.bounds ## self._imgwin_wd, self._imgwin_ht = width, height # Grab the RGB array for the current image and place it in the # matplotlib figure axis data = self.getwin_array(order=self.rgb_order, dtype=np.uint8) dst_x = dst_y = 0 # fill background color ## rect = self.figure.patch ## rect.set_facecolor(self.img_bg) # attempt 1: using a FigureImage (non-scaled) if self.mpimage is None: self.mpimage = self.figure.figimage(data, xo=dst_x, yo=dst_y, origin='upper') else: # note: this is not a typo--these offsets have a different # attribute name than in the constructor ('ox' vs. 'xo') self.mpimage.ox = dst_x self.mpimage.oy = dst_y self.mpimage.set_data(data)
Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. NOTE: this version uses a Figure.FigImage to render the image.
Below is the the instruction that describes the task: ### Input: Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. NOTE: this version uses a Figure.FigImage to render the image. ### Response: def render_image1(self, rgbobj, dst_x, dst_y): """Render the image represented by (rgbobj) at dst_x, dst_y in the pixel space. NOTE: this version uses a Figure.FigImage to render the image. """ self.logger.debug("redraw surface") if self.figure is None: return ## left, bottom, width, height = self.ax_img.bbox.bounds ## self._imgwin_wd, self._imgwin_ht = width, height # Grab the RGB array for the current image and place it in the # matplotlib figure axis data = self.getwin_array(order=self.rgb_order, dtype=np.uint8) dst_x = dst_y = 0 # fill background color ## rect = self.figure.patch ## rect.set_facecolor(self.img_bg) # attempt 1: using a FigureImage (non-scaled) if self.mpimage is None: self.mpimage = self.figure.figimage(data, xo=dst_x, yo=dst_y, origin='upper') else: # note: this is not a typo--these offsets have a different # attribute name than in the constructor ('ox' vs. 'xo') self.mpimage.ox = dst_x self.mpimage.oy = dst_y self.mpimage.set_data(data)
def add_pipers(self, pipers, *args, **kwargs): """ Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_piper``. Arguments: - pipers(sequence of valid ``add_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to the ``Dagger`` in the left to right order. """ for piper in pipers: self.add_piper(piper, *args, **kwargs)
Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_piper``. Arguments: - pipers(sequence of valid ``add_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to the ``Dagger`` in the left to right order.
Below is the the instruction that describes the task: ### Input: Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_piper``. Arguments: - pipers(sequence of valid ``add_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to the ``Dagger`` in the left to right order. ### Response: def add_pipers(self, pipers, *args, **kwargs): """ Adds a sequence of ``Pipers`` instances to the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.add_piper``. Arguments: - pipers(sequence of valid ``add_piper`` arguments) Sequence of ``Pipers`` or valid ``Dagger.add_piper`` arguments to be added to the ``Dagger`` in the left to right order. """ for piper in pipers: self.add_piper(piper, *args, **kwargs)
def force_vertical_padding_after( self, index: int, padding: Union[int, float]) -> None: """Change the padding after the given row.""" self.vertical_padding[index] = padding
Change the padding after the given row.
Below is the the instruction that describes the task: ### Input: Change the padding after the given row. ### Response: def force_vertical_padding_after( self, index: int, padding: Union[int, float]) -> None: """Change the padding after the given row.""" self.vertical_padding[index] = padding
def members(group_id): """List user group members.""" page = request.args.get('page', 1, type=int) per_page = request.args.get('per_page', 5, type=int) q = request.args.get('q', '') s = request.args.get('s', '') group = Group.query.get_or_404(group_id) if group.can_see_members(current_user): members = Membership.query_by_group(group_id, with_invitations=True) if q: members = Membership.search(members, q) if s: members = Membership.order(members, Membership.state, s) members = members.paginate(page, per_page=per_page) return render_template( "invenio_groups/members.html", group=group, members=members, page=page, per_page=per_page, q=q, s=s, ) flash( _( 'You are not allowed to see members of this group %(group_name)s.', group_name=group.name ), 'error' ) return redirect(url_for('.index'))
List user group members.
Below is the the instruction that describes the task: ### Input: List user group members. ### Response: def members(group_id): """List user group members.""" page = request.args.get('page', 1, type=int) per_page = request.args.get('per_page', 5, type=int) q = request.args.get('q', '') s = request.args.get('s', '') group = Group.query.get_or_404(group_id) if group.can_see_members(current_user): members = Membership.query_by_group(group_id, with_invitations=True) if q: members = Membership.search(members, q) if s: members = Membership.order(members, Membership.state, s) members = members.paginate(page, per_page=per_page) return render_template( "invenio_groups/members.html", group=group, members=members, page=page, per_page=per_page, q=q, s=s, ) flash( _( 'You are not allowed to see members of this group %(group_name)s.', group_name=group.name ), 'error' ) return redirect(url_for('.index'))
def _get_key_values(self, name): """Return a dict containing key / values items for a given key, used for items like filters, page, etc. :param str name: name of the querystring parameter :return dict: a dict of key / values items """ results = {} for key, value in self.qs.items(): try: if not key.startswith(name): continue key_start = key.index('[') + 1 key_end = key.index(']') item_key = key[key_start:key_end] if ',' in value: item_value = value.split(',') else: item_value = value results.update({item_key: item_value}) except Exception: raise BadRequest("Parse error", source={'parameter': key}) return results
Return a dict containing key / values items for a given key, used for items like filters, page, etc. :param str name: name of the querystring parameter :return dict: a dict of key / values items
Below is the the instruction that describes the task: ### Input: Return a dict containing key / values items for a given key, used for items like filters, page, etc. :param str name: name of the querystring parameter :return dict: a dict of key / values items ### Response: def _get_key_values(self, name): """Return a dict containing key / values items for a given key, used for items like filters, page, etc. :param str name: name of the querystring parameter :return dict: a dict of key / values items """ results = {} for key, value in self.qs.items(): try: if not key.startswith(name): continue key_start = key.index('[') + 1 key_end = key.index(']') item_key = key[key_start:key_end] if ',' in value: item_value = value.split(',') else: item_value = value results.update({item_key: item_value}) except Exception: raise BadRequest("Parse error", source={'parameter': key}) return results
def print_device_info(dev_name): """Prints information about the given device. Usage: print_device_info("Dev1") """ string_buffer = ctypes.create_string_buffer(1024) attributes = [pydaq.DAQmx_Dev_ProductType, pydaq.DAQmx_Dev_SerialNum, pydaq.DAQmx_Dev_AO_PhysicalChans, pydaq.DAQmx_Dev_CI_PhysicalChans, pydaq.DAQmx_Dev_CO_PhysicalChans, pydaq.DAQmx_Dev_DO_Lines] attribute_names = ['DAQmx_Dev_ProductType', 'DAQmx_Dev_SerialNum', 'DAQmx_Dev_AO_PhysicalChans', 'DAQmx_Dev_CI_PhysicalChans', 'DAQmx_Dev_CO_PhysicalChans', 'DAQmx_Dev_DO_Lines'] ret_values = [] for a in attributes: pydaq.DAQmxGetDeviceAttribute(dev_name, a, string_buffer) ret_values.append(str(string_buffer.value)) print('Device Name:\t' + dev_name) for n, v in zip(attribute_names, ret_values): print '\t' + n + ':\t' + v
Prints information about the given device. Usage: print_device_info("Dev1")
Below is the the instruction that describes the task: ### Input: Prints information about the given device. Usage: print_device_info("Dev1") ### Response: def print_device_info(dev_name): """Prints information about the given device. Usage: print_device_info("Dev1") """ string_buffer = ctypes.create_string_buffer(1024) attributes = [pydaq.DAQmx_Dev_ProductType, pydaq.DAQmx_Dev_SerialNum, pydaq.DAQmx_Dev_AO_PhysicalChans, pydaq.DAQmx_Dev_CI_PhysicalChans, pydaq.DAQmx_Dev_CO_PhysicalChans, pydaq.DAQmx_Dev_DO_Lines] attribute_names = ['DAQmx_Dev_ProductType', 'DAQmx_Dev_SerialNum', 'DAQmx_Dev_AO_PhysicalChans', 'DAQmx_Dev_CI_PhysicalChans', 'DAQmx_Dev_CO_PhysicalChans', 'DAQmx_Dev_DO_Lines'] ret_values = [] for a in attributes: pydaq.DAQmxGetDeviceAttribute(dev_name, a, string_buffer) ret_values.append(str(string_buffer.value)) print('Device Name:\t' + dev_name) for n, v in zip(attribute_names, ret_values): print '\t' + n + ':\t' + v
def _compile_scalar(schema): """A scalar value. The schema can either be a value or a type. >>> _compile_scalar(int)([], 1) 1 >>> with raises(er.Invalid, 'expected float'): ... _compile_scalar(float)([], '1') Callables have >>> _compile_scalar(lambda v: float(v))([], '1') 1.0 As a convenience, ValueError's are trapped: >>> with raises(er.Invalid, 'not a valid value'): ... _compile_scalar(lambda v: float(v))([], 'a') """ if inspect.isclass(schema): def validate_instance(path, data): if isinstance(data, schema): return data else: msg = 'expected %s' % schema.__name__ raise er.TypeInvalid(msg, path) return validate_instance if callable(schema): def validate_callable(path, data): try: return schema(data) except ValueError as e: raise er.ValueInvalid('not a valid value', path) except er.Invalid as e: e.prepend(path) raise return validate_callable def validate_value(path, data): if data != schema: raise er.ScalarInvalid('not a valid value', path) return data return validate_value
A scalar value. The schema can either be a value or a type. >>> _compile_scalar(int)([], 1) 1 >>> with raises(er.Invalid, 'expected float'): ... _compile_scalar(float)([], '1') Callables have >>> _compile_scalar(lambda v: float(v))([], '1') 1.0 As a convenience, ValueError's are trapped: >>> with raises(er.Invalid, 'not a valid value'): ... _compile_scalar(lambda v: float(v))([], 'a')
Below is the the instruction that describes the task: ### Input: A scalar value. The schema can either be a value or a type. >>> _compile_scalar(int)([], 1) 1 >>> with raises(er.Invalid, 'expected float'): ... _compile_scalar(float)([], '1') Callables have >>> _compile_scalar(lambda v: float(v))([], '1') 1.0 As a convenience, ValueError's are trapped: >>> with raises(er.Invalid, 'not a valid value'): ... _compile_scalar(lambda v: float(v))([], 'a') ### Response: def _compile_scalar(schema): """A scalar value. The schema can either be a value or a type. >>> _compile_scalar(int)([], 1) 1 >>> with raises(er.Invalid, 'expected float'): ... _compile_scalar(float)([], '1') Callables have >>> _compile_scalar(lambda v: float(v))([], '1') 1.0 As a convenience, ValueError's are trapped: >>> with raises(er.Invalid, 'not a valid value'): ... _compile_scalar(lambda v: float(v))([], 'a') """ if inspect.isclass(schema): def validate_instance(path, data): if isinstance(data, schema): return data else: msg = 'expected %s' % schema.__name__ raise er.TypeInvalid(msg, path) return validate_instance if callable(schema): def validate_callable(path, data): try: return schema(data) except ValueError as e: raise er.ValueInvalid('not a valid value', path) except er.Invalid as e: e.prepend(path) raise return validate_callable def validate_value(path, data): if data != schema: raise er.ScalarInvalid('not a valid value', path) return data return validate_value
def write_data(self, variable_id, value, task): """ write values to the device """ variable = self._variables[variable_id] if task.property_name != '': # write the property to VariableProperty use that for later read vp = VariableProperty.objects.update_or_create_property(variable=variable, name='VISA:%s' % task.property_name.upper(), value=value, value_class='FLOAT64') return True return False i = 0 j = 0 while i < 10: try: self.inst.read_termination = '\n' self.inst.query('*IDN?') i = 12 j = 1 except: self.connect() time.sleep(1) i += 1 logger.error("Keithley connect error i : %s" %i) if j == 0: logger.error("Keithley-Instrument not connected") return False # if variable_id == 'present_value': if task.variable.visavariable.device_property.upper() == 'PRESENT_VALUE': i = 0 while i < 10: Vseff = "" try: Vseff = self.parse_value(self.inst.query(':READ?')) except: Vseff = "" if Vseff is None or Vseff is "": i += 1 logger.error("Keithley - Error Read - i : %s" %i) self.inst.write('*CLS') else: i = 12 # Call Phase Osc # cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Find_Phase_Osc').id, value=Vseff, start=time.time()) # cwt.save() logger.info("Variable %s - task.property_name : %s - value %s" %(variable, task.property_name.upper(), value)) vp = VariableProperty.objects.update_or_create_property(variable=variable, name='VISA:%s' % task.property_name.upper(), value=value, value_class='FLOAT64') #vp = VariableProperty.objects.update_or_create_property(variable=variable, # name='VISA:%s' % task.property_name.upper()) return Vseff if variable_instance.visavariable.device_property.upper() == 'SET_AC_RANGE_RES': # if variable_id == 'set_ac_range_res': CMD = str('*RST;:FUNC "VOLTage:AC";:VOLTage:AC:RANGe:AUTO 1;:VOLTage:AC:RESolution MIN;:TRIG:DEL MIN') self.inst.write(CMD) return True else: logger.error("Keithley - variable_id : %s" %variable_id) return self.parse_value(self.inst.query(str(variable_id)+' '+str(value)))
write values to the device
Below is the the instruction that describes the task: ### Input: write values to the device ### Response: def write_data(self, variable_id, value, task): """ write values to the device """ variable = self._variables[variable_id] if task.property_name != '': # write the property to VariableProperty use that for later read vp = VariableProperty.objects.update_or_create_property(variable=variable, name='VISA:%s' % task.property_name.upper(), value=value, value_class='FLOAT64') return True return False i = 0 j = 0 while i < 10: try: self.inst.read_termination = '\n' self.inst.query('*IDN?') i = 12 j = 1 except: self.connect() time.sleep(1) i += 1 logger.error("Keithley connect error i : %s" %i) if j == 0: logger.error("Keithley-Instrument not connected") return False # if variable_id == 'present_value': if task.variable.visavariable.device_property.upper() == 'PRESENT_VALUE': i = 0 while i < 10: Vseff = "" try: Vseff = self.parse_value(self.inst.query(':READ?')) except: Vseff = "" if Vseff is None or Vseff is "": i += 1 logger.error("Keithley - Error Read - i : %s" %i) self.inst.write('*CLS') else: i = 12 # Call Phase Osc # cwt = DeviceWriteTask(variable_id=Variable.objects.get(name='Find_Phase_Osc').id, value=Vseff, start=time.time()) # cwt.save() logger.info("Variable %s - task.property_name : %s - value %s" %(variable, task.property_name.upper(), value)) vp = VariableProperty.objects.update_or_create_property(variable=variable, name='VISA:%s' % task.property_name.upper(), value=value, value_class='FLOAT64') #vp = VariableProperty.objects.update_or_create_property(variable=variable, # name='VISA:%s' % task.property_name.upper()) return Vseff if variable_instance.visavariable.device_property.upper() == 'SET_AC_RANGE_RES': # if variable_id == 'set_ac_range_res': CMD = str('*RST;:FUNC "VOLTage:AC";:VOLTage:AC:RANGe:AUTO 1;:VOLTage:AC:RESolution MIN;:TRIG:DEL MIN') self.inst.write(CMD) return True else: logger.error("Keithley - variable_id : %s" %variable_id) return self.parse_value(self.inst.query(str(variable_id)+' '+str(value)))
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._time_responded is not None: return False if self._time_expiry is not None: return False if self._time_refund_requested is not None: return False if self._time_refunded is not None: return False if self._user_refund_requested is not None: return False if self._monetary_account_id is not None: return False if self._amount_inquired is not None: return False if self._amount_responded is not None: return False if self._status is not None: return False if self._description is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._attachment is not None: return False if self._minimum_age is not None: return False if self._require_address is not None: return False if self._geolocation is not None: return False if self._type_ is not None: return False if self._sub_type is not None: return False if self._redirect_url is not None: return False if self._address_billing is not None: return False if self._address_shipping is not None: return False if self._allow_chat is not None: return False if self._credit_scheme_identifier is not None: return False if self._mandate_identifier is not None: return False if self._eligible_whitelist_id is not None: return False if self._request_reference_split_the_bill is not None: return False return True
:rtype: bool
Below is the the instruction that describes the task: ### Input: :rtype: bool ### Response: def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._time_responded is not None: return False if self._time_expiry is not None: return False if self._time_refund_requested is not None: return False if self._time_refunded is not None: return False if self._user_refund_requested is not None: return False if self._monetary_account_id is not None: return False if self._amount_inquired is not None: return False if self._amount_responded is not None: return False if self._status is not None: return False if self._description is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._attachment is not None: return False if self._minimum_age is not None: return False if self._require_address is not None: return False if self._geolocation is not None: return False if self._type_ is not None: return False if self._sub_type is not None: return False if self._redirect_url is not None: return False if self._address_billing is not None: return False if self._address_shipping is not None: return False if self._allow_chat is not None: return False if self._credit_scheme_identifier is not None: return False if self._mandate_identifier is not None: return False if self._eligible_whitelist_id is not None: return False if self._request_reference_split_the_bill is not None: return False return True
async def call(self, method, **params): """ Call an Slack Web API method :param method: Slack Web API method to call :param params: {str: object} parameters to method :return: dict() """ url = self.SLACK_RPC_PREFIX + method data = FormData() data.add_fields(MultiDict(token=self.bot_token, charset='utf-8', **params)) response_body = await self.request( method='POST', url=url, data=data ) if 'warning' in response_body: logger.warning(f'Warnings received from API call {method}: {response_body["warning"]}') if 'ok' not in response_body: logger.error(f'No ok marker in slack API call {method} {params} => {response_body}') raise SlackCallException('There is no ok marker, ... strange', method=method) if not response_body['ok']: logger.error(f'Slack API call failed {method} {params} => {response_body}') raise SlackCallException(f'No OK response returned', method=method) return response_body
Call an Slack Web API method :param method: Slack Web API method to call :param params: {str: object} parameters to method :return: dict()
Below is the the instruction that describes the task: ### Input: Call an Slack Web API method :param method: Slack Web API method to call :param params: {str: object} parameters to method :return: dict() ### Response: async def call(self, method, **params): """ Call an Slack Web API method :param method: Slack Web API method to call :param params: {str: object} parameters to method :return: dict() """ url = self.SLACK_RPC_PREFIX + method data = FormData() data.add_fields(MultiDict(token=self.bot_token, charset='utf-8', **params)) response_body = await self.request( method='POST', url=url, data=data ) if 'warning' in response_body: logger.warning(f'Warnings received from API call {method}: {response_body["warning"]}') if 'ok' not in response_body: logger.error(f'No ok marker in slack API call {method} {params} => {response_body}') raise SlackCallException('There is no ok marker, ... strange', method=method) if not response_body['ok']: logger.error(f'Slack API call failed {method} {params} => {response_body}') raise SlackCallException(f'No OK response returned', method=method) return response_body
def binary_back_substitute(W: np.ndarray, s: np.ndarray) -> np.ndarray: """ Perform back substitution on a binary system of equations, i.e. it performs Gauss elimination over the field :math:`GF(2)`. It finds an :math:`\\mathbf{x}` such that :math:`\\mathbf{\\mathit{W}}\\mathbf{x}=\\mathbf{s}`, where all arithmetic is taken bitwise and modulo 2. :param W: A square :math:`n\\times n` matrix of 0s and 1s, in row-echelon (upper-triangle) form :param s: An :math:`n\\times 1` vector of 0s and 1s :return: The :math:`n\\times 1` vector of 0s and 1s that solves the above system of equations. """ # iterate backwards, starting from second to last row for back-substitution m = np.copy(s) n = len(s) for row_num in range(n - 2, -1, -1): row = W[row_num] for col_num in range(row_num + 1, n): if row[col_num] == 1: m[row_num] = xor(s[row_num], s[col_num]) return m[::-1]
Perform back substitution on a binary system of equations, i.e. it performs Gauss elimination over the field :math:`GF(2)`. It finds an :math:`\\mathbf{x}` such that :math:`\\mathbf{\\mathit{W}}\\mathbf{x}=\\mathbf{s}`, where all arithmetic is taken bitwise and modulo 2. :param W: A square :math:`n\\times n` matrix of 0s and 1s, in row-echelon (upper-triangle) form :param s: An :math:`n\\times 1` vector of 0s and 1s :return: The :math:`n\\times 1` vector of 0s and 1s that solves the above system of equations.
Below is the the instruction that describes the task: ### Input: Perform back substitution on a binary system of equations, i.e. it performs Gauss elimination over the field :math:`GF(2)`. It finds an :math:`\\mathbf{x}` such that :math:`\\mathbf{\\mathit{W}}\\mathbf{x}=\\mathbf{s}`, where all arithmetic is taken bitwise and modulo 2. :param W: A square :math:`n\\times n` matrix of 0s and 1s, in row-echelon (upper-triangle) form :param s: An :math:`n\\times 1` vector of 0s and 1s :return: The :math:`n\\times 1` vector of 0s and 1s that solves the above system of equations. ### Response: def binary_back_substitute(W: np.ndarray, s: np.ndarray) -> np.ndarray: """ Perform back substitution on a binary system of equations, i.e. it performs Gauss elimination over the field :math:`GF(2)`. It finds an :math:`\\mathbf{x}` such that :math:`\\mathbf{\\mathit{W}}\\mathbf{x}=\\mathbf{s}`, where all arithmetic is taken bitwise and modulo 2. :param W: A square :math:`n\\times n` matrix of 0s and 1s, in row-echelon (upper-triangle) form :param s: An :math:`n\\times 1` vector of 0s and 1s :return: The :math:`n\\times 1` vector of 0s and 1s that solves the above system of equations. """ # iterate backwards, starting from second to last row for back-substitution m = np.copy(s) n = len(s) for row_num in range(n - 2, -1, -1): row = W[row_num] for col_num in range(row_num + 1, n): if row[col_num] == 1: m[row_num] = xor(s[row_num], s[col_num]) return m[::-1]
def write_response_html_to_file(response,filename): """ An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name. """ fout = open(filename,'w') if not str(response.status_code).startswith("2"): Model.debug_logger.debug(response.text) fout.write(response.text) fout.close()
An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name.
Below is the the instruction that describes the task: ### Input: An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name. ### Response: def write_response_html_to_file(response,filename): """ An aid in troubleshooting internal application errors, i.e. <Response [500]>, to be mainly beneficial when developing the server-side API. This method will write the response HTML for viewing the error details in the browesr. Args: response: `requests.models.Response` instance. filename: `str`. The output file name. """ fout = open(filename,'w') if not str(response.status_code).startswith("2"): Model.debug_logger.debug(response.text) fout.write(response.text) fout.close()
def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
writes the content into the file :param filename: the filename :param content: teh content :return:
Below is the the instruction that describes the task: ### Input: writes the content into the file :param filename: the filename :param content: teh content :return: ### Response: def writefile(filename, content): """ writes the content into the file :param filename: the filename :param content: teh content :return: """ with open(path_expand(filename), 'w') as outfile: outfile.write(content)
def median(self, **kwargs): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex """ try: return self._cython_agg_general('median', **kwargs) except GroupByError: raise except Exception: # pragma: no cover def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex
Below is the the instruction that describes the task: ### Input: Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex ### Response: def median(self, **kwargs): """ Compute median of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex """ try: return self._cython_agg_general('median', **kwargs) except GroupByError: raise except Exception: # pragma: no cover def f(x): if isinstance(x, np.ndarray): x = Series(x) return x.median(axis=self.axis, **kwargs) with _group_selection_context(self): return self._python_agg_general(f)
def _add_task(self, *args, **kwargs): """ Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`. """ task_id = kwargs['task_id'] status = kwargs['status'] runnable = kwargs['runnable'] task = self._scheduled_tasks.get(task_id) if task: self._add_task_history.append((task, status, runnable)) kwargs['owners'] = task._owner_list() if task_id in self._batch_running_tasks: for batch_task in self._batch_running_tasks.pop(task_id): self._add_task_history.append((batch_task, status, True)) if task and kwargs.get('params'): kwargs['param_visibilities'] = task._get_param_visibilities() self._scheduler.add_task(*args, **kwargs) logger.info('Informed scheduler that task %s has status %s', task_id, status)
Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`.
Below is the the instruction that describes the task: ### Input: Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`. ### Response: def _add_task(self, *args, **kwargs): """ Call ``self._scheduler.add_task``, but store the values too so we can implement :py:func:`luigi.execution_summary.summary`. """ task_id = kwargs['task_id'] status = kwargs['status'] runnable = kwargs['runnable'] task = self._scheduled_tasks.get(task_id) if task: self._add_task_history.append((task, status, runnable)) kwargs['owners'] = task._owner_list() if task_id in self._batch_running_tasks: for batch_task in self._batch_running_tasks.pop(task_id): self._add_task_history.append((batch_task, status, True)) if task and kwargs.get('params'): kwargs['param_visibilities'] = task._get_param_visibilities() self._scheduler.add_task(*args, **kwargs) logger.info('Informed scheduler that task %s has status %s', task_id, status)
def omim_terms(case_obj): """Extract all OMIM phenotypes available for the case Args: case_obj(dict): a scout case object Returns: disorders(list): a list of OMIM disorder objects """ LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get('display_name'))) disorders = [] case_disorders = case_obj.get('diagnosis_phenotypes') # array of OMIM terms if case_disorders: for disorder in case_disorders: disorder_obj = { "id" : ':'.join([ 'MIM', str(disorder)]) } disorders.append(disorder_obj) return disorders
Extract all OMIM phenotypes available for the case Args: case_obj(dict): a scout case object Returns: disorders(list): a list of OMIM disorder objects
Below is the the instruction that describes the task: ### Input: Extract all OMIM phenotypes available for the case Args: case_obj(dict): a scout case object Returns: disorders(list): a list of OMIM disorder objects ### Response: def omim_terms(case_obj): """Extract all OMIM phenotypes available for the case Args: case_obj(dict): a scout case object Returns: disorders(list): a list of OMIM disorder objects """ LOG.info("Collecting OMIM disorders for case {}".format(case_obj.get('display_name'))) disorders = [] case_disorders = case_obj.get('diagnosis_phenotypes') # array of OMIM terms if case_disorders: for disorder in case_disorders: disorder_obj = { "id" : ':'.join([ 'MIM', str(disorder)]) } disorders.append(disorder_obj) return disorders
def pubkey(self, identity, ecdh=False): """Return public key.""" _verify_support(identity, ecdh) return trezor.Trezor.pubkey(self, identity=identity, ecdh=ecdh)
Return public key.
Below is the the instruction that describes the task: ### Input: Return public key. ### Response: def pubkey(self, identity, ecdh=False): """Return public key.""" _verify_support(identity, ecdh) return trezor.Trezor.pubkey(self, identity=identity, ecdh=ecdh)
def options(self, parser, env=os.environ): """Handle parsing additional command-line options""" super(PerfDumpPlugin, self).options(parser, env=env) parser.add_option("", "--perfdump-html", dest="perfdump_html_file", help="Set destination for HTML report output")
Handle parsing additional command-line options
Below is the the instruction that describes the task: ### Input: Handle parsing additional command-line options ### Response: def options(self, parser, env=os.environ): """Handle parsing additional command-line options""" super(PerfDumpPlugin, self).options(parser, env=env) parser.add_option("", "--perfdump-html", dest="perfdump_html_file", help="Set destination for HTML report output")
def toStringArray(name, a, width = 0): """ Returns an array (any sequence of floats, really) as a string. """ string = name + ": " cnt = 0 for i in a: string += "%4.2f " % i if width > 0 and (cnt + 1) % width == 0: string += '\n' cnt += 1 return string
Returns an array (any sequence of floats, really) as a string.
Below is the the instruction that describes the task: ### Input: Returns an array (any sequence of floats, really) as a string. ### Response: def toStringArray(name, a, width = 0): """ Returns an array (any sequence of floats, really) as a string. """ string = name + ": " cnt = 0 for i in a: string += "%4.2f " % i if width > 0 and (cnt + 1) % width == 0: string += '\n' cnt += 1 return string
def append_varint32(self, value): """Appends a signed 32-bit integer to the internal buffer, encoded as a varint. (Note that a negative varint32 will always require 10 bytes of space.) """ if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX: raise errors.EncodeError('Value out of range: %d' % value) self.append_varint64(value)
Appends a signed 32-bit integer to the internal buffer, encoded as a varint. (Note that a negative varint32 will always require 10 bytes of space.)
Below is the the instruction that describes the task: ### Input: Appends a signed 32-bit integer to the internal buffer, encoded as a varint. (Note that a negative varint32 will always require 10 bytes of space.) ### Response: def append_varint32(self, value): """Appends a signed 32-bit integer to the internal buffer, encoded as a varint. (Note that a negative varint32 will always require 10 bytes of space.) """ if not wire_format.INT32_MIN <= value <= wire_format.INT32_MAX: raise errors.EncodeError('Value out of range: %d' % value) self.append_varint64(value)
def trace_graph(graph): """Build a collection of "traces" for each package. A trace is a list of names that eventually leads to the package. For example, if A and B are root dependencies, A depends on C and D, B depends on C, and C depends on D, the return value would be like:: { None: [], "A": [None], "B": [None], "C": [[None, "A"], [None, "B"]], "D": [[None, "B", "C"], [None, "A"]], } """ result = {None: []} for vertex in graph: result[vertex] = [] for root in graph.iter_children(None): paths = [] _trace_visit_vertex(graph, root, vertex, {None}, [None], paths) result[vertex].extend(paths) return result
Build a collection of "traces" for each package. A trace is a list of names that eventually leads to the package. For example, if A and B are root dependencies, A depends on C and D, B depends on C, and C depends on D, the return value would be like:: { None: [], "A": [None], "B": [None], "C": [[None, "A"], [None, "B"]], "D": [[None, "B", "C"], [None, "A"]], }
Below is the the instruction that describes the task: ### Input: Build a collection of "traces" for each package. A trace is a list of names that eventually leads to the package. For example, if A and B are root dependencies, A depends on C and D, B depends on C, and C depends on D, the return value would be like:: { None: [], "A": [None], "B": [None], "C": [[None, "A"], [None, "B"]], "D": [[None, "B", "C"], [None, "A"]], } ### Response: def trace_graph(graph): """Build a collection of "traces" for each package. A trace is a list of names that eventually leads to the package. For example, if A and B are root dependencies, A depends on C and D, B depends on C, and C depends on D, the return value would be like:: { None: [], "A": [None], "B": [None], "C": [[None, "A"], [None, "B"]], "D": [[None, "B", "C"], [None, "A"]], } """ result = {None: []} for vertex in graph: result[vertex] = [] for root in graph.iter_children(None): paths = [] _trace_visit_vertex(graph, root, vertex, {None}, [None], paths) result[vertex].extend(paths) return result
def apk(actual, predicted, k=10): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if len(actual) == 0: return 0.0 return score / min(len(actual), k)
Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists
Below is the the instruction that describes the task: ### Input: Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists ### Response: def apk(actual, predicted, k=10): """ Computes the average precision at k. This function computes the average prescision at k between two lists of items. Parameters ---------- actual : list A list of elements that are to be predicted (order doesn't matter) predicted : list A list of predicted elements (order does matter) k : int, optional The maximum number of predicted elements Returns ------- score : double The average precision at k over the input lists """ if len(predicted)>k: predicted = predicted[:k] score = 0.0 num_hits = 0.0 for i,p in enumerate(predicted): if p in actual and p not in predicted[:i]: num_hits += 1.0 score += num_hits / (i+1.0) if len(actual) == 0: return 0.0 return score / min(len(actual), k)
def random_state(state=None): """ Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState """ if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState
Below is the the instruction that describes the task: ### Input: Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState ### Response: def random_state(state=None): """ Helper function for processing random_state arguments. Parameters ---------- state : int, np.random.RandomState, None. If receives an int, passes to np.random.RandomState() as seed. If receives an np.random.RandomState object, just returns object. If receives `None`, returns np.random. If receives anything else, raises an informative ValueError. Default None. Returns ------- np.random.RandomState """ if is_integer(state): return np.random.RandomState(state) elif isinstance(state, np.random.RandomState): return state elif state is None: return np.random else: raise ValueError("random_state must be an integer, a numpy " "RandomState, or None")
def is_abstract(self) -> bool: """ Whether or not the class-under-construction was declared as abstract (**NOTE:** this property is usable even *before* the :class:`MetaOptionsFactory` has run) """ meta_value = getattr(self.clsdict.get('Meta'), 'abstract', False) return self.clsdict.get(ABSTRACT_ATTR, meta_value) is True
Whether or not the class-under-construction was declared as abstract (**NOTE:** this property is usable even *before* the :class:`MetaOptionsFactory` has run)
Below is the the instruction that describes the task: ### Input: Whether or not the class-under-construction was declared as abstract (**NOTE:** this property is usable even *before* the :class:`MetaOptionsFactory` has run) ### Response: def is_abstract(self) -> bool: """ Whether or not the class-under-construction was declared as abstract (**NOTE:** this property is usable even *before* the :class:`MetaOptionsFactory` has run) """ meta_value = getattr(self.clsdict.get('Meta'), 'abstract', False) return self.clsdict.get(ABSTRACT_ATTR, meta_value) is True
def common_values_dict(): """Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place. """ now = datetime.datetime.utcnow().isoformat() etag = utils.gen_etag() values = { 'id': utils.gen_uuid(), 'created_at': now, 'updated_at': now, 'etag': etag } return values
Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place.
Below is the the instruction that describes the task: ### Input: Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place. ### Response: def common_values_dict(): """Build a basic values object used in every create method. All our resources contain a same subset of value. Instead of redoing this code everytime, this method ensures it is done only at one place. """ now = datetime.datetime.utcnow().isoformat() etag = utils.gen_etag() values = { 'id': utils.gen_uuid(), 'created_at': now, 'updated_at': now, 'etag': etag } return values
def available(software=True, drivers=True, summary=False, skip_installed=True, skip_hidden=True, skip_mandatory=False, skip_reboot=False, categories=None, severities=None,): ''' .. versionadded:: 2017.7.0 List updates that match the passed criteria. This allows for more filter options than :func:`list`. Good for finding a specific GUID or KB. Args: software (bool): Include software updates in the results (default is True) drivers (bool): Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. - False (default): Return a detailed list of available updates. skip_installed (bool): Skip updates that are already installed. Default is False. skip_hidden (bool): Skip updates that have been hidden. Default is True. skip_mandatory (bool): Skip mandatory updates. Default is False. skip_reboot (bool): Skip updates that require a reboot. Default is False. categories (list): Specify the categories to list. Must be passed as a list. All categories returned by default. Categories include the following: * Critical Updates * Definition Updates * Drivers (make sure you set drivers=True) * Feature Packs * Security Updates * Update Rollups * Updates * Update Rollups * Windows 7 * Windows 8.1 * Windows 8.1 drivers * Windows 8.1 and later drivers * Windows Defender severities (list): Specify the severities to include. Must be passed as a list. All severities returned by default. Severities include the following: * Critical * Important Returns: dict: Returns a dict containing either a summary or a list of updates: .. code-block:: cfg List of Updates: {'<GUID>': {'Title': <title>, 'KB': <KB>, 'GUID': <the globally unique identifier for the update> 'Description': <description>, 'Downloaded': <has the update been downloaded>, 'Installed': <has the update been installed>, 'Mandatory': <is the update mandatory>, 'UserInput': <is user input required>, 'EULAAccepted': <has the EULA been accepted>, 'Severity': <update severity>, 'NeedsReboot': <is the update installed and awaiting reboot>, 'RebootBehavior': <will the update require a reboot>, 'Categories': [ '<category 1>', '<category 2>', ...] } } Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } CLI Examples: .. code-block:: bash # Normal Usage (list all software updates) salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() # Look for available updates = wua.available( skip_hidden=skip_hidden, skip_installed=skip_installed, skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, software=software, drivers=drivers, categories=categories, severities=severities) # Return results as Summary or Details return updates.summary() if summary else updates.list()
.. versionadded:: 2017.7.0 List updates that match the passed criteria. This allows for more filter options than :func:`list`. Good for finding a specific GUID or KB. Args: software (bool): Include software updates in the results (default is True) drivers (bool): Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. - False (default): Return a detailed list of available updates. skip_installed (bool): Skip updates that are already installed. Default is False. skip_hidden (bool): Skip updates that have been hidden. Default is True. skip_mandatory (bool): Skip mandatory updates. Default is False. skip_reboot (bool): Skip updates that require a reboot. Default is False. categories (list): Specify the categories to list. Must be passed as a list. All categories returned by default. Categories include the following: * Critical Updates * Definition Updates * Drivers (make sure you set drivers=True) * Feature Packs * Security Updates * Update Rollups * Updates * Update Rollups * Windows 7 * Windows 8.1 * Windows 8.1 drivers * Windows 8.1 and later drivers * Windows Defender severities (list): Specify the severities to include. Must be passed as a list. All severities returned by default. Severities include the following: * Critical * Important Returns: dict: Returns a dict containing either a summary or a list of updates: .. code-block:: cfg List of Updates: {'<GUID>': {'Title': <title>, 'KB': <KB>, 'GUID': <the globally unique identifier for the update> 'Description': <description>, 'Downloaded': <has the update been downloaded>, 'Installed': <has the update been installed>, 'Mandatory': <is the update mandatory>, 'UserInput': <is user input required>, 'EULAAccepted': <has the EULA been accepted>, 'Severity': <update severity>, 'NeedsReboot': <is the update installed and awaiting reboot>, 'RebootBehavior': <will the update require a reboot>, 'Categories': [ '<category 1>', '<category 2>', ...] } } Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } CLI Examples: .. code-block:: bash # Normal Usage (list all software updates) salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True
Below is the the instruction that describes the task: ### Input: .. versionadded:: 2017.7.0 List updates that match the passed criteria. This allows for more filter options than :func:`list`. Good for finding a specific GUID or KB. Args: software (bool): Include software updates in the results (default is True) drivers (bool): Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. - False (default): Return a detailed list of available updates. skip_installed (bool): Skip updates that are already installed. Default is False. skip_hidden (bool): Skip updates that have been hidden. Default is True. skip_mandatory (bool): Skip mandatory updates. Default is False. skip_reboot (bool): Skip updates that require a reboot. Default is False. categories (list): Specify the categories to list. Must be passed as a list. All categories returned by default. Categories include the following: * Critical Updates * Definition Updates * Drivers (make sure you set drivers=True) * Feature Packs * Security Updates * Update Rollups * Updates * Update Rollups * Windows 7 * Windows 8.1 * Windows 8.1 drivers * Windows 8.1 and later drivers * Windows Defender severities (list): Specify the severities to include. Must be passed as a list. All severities returned by default. Severities include the following: * Critical * Important Returns: dict: Returns a dict containing either a summary or a list of updates: .. code-block:: cfg List of Updates: {'<GUID>': {'Title': <title>, 'KB': <KB>, 'GUID': <the globally unique identifier for the update> 'Description': <description>, 'Downloaded': <has the update been downloaded>, 'Installed': <has the update been installed>, 'Mandatory': <is the update mandatory>, 'UserInput': <is user input required>, 'EULAAccepted': <has the EULA been accepted>, 'Severity': <update severity>, 'NeedsReboot': <is the update installed and awaiting reboot>, 'RebootBehavior': <will the update require a reboot>, 'Categories': [ '<category 1>', '<category 2>', ...] } } Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } CLI Examples: .. code-block:: bash # Normal Usage (list all software updates) salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True ### Response: def available(software=True, drivers=True, summary=False, skip_installed=True, skip_hidden=True, skip_mandatory=False, skip_reboot=False, categories=None, severities=None,): ''' .. versionadded:: 2017.7.0 List updates that match the passed criteria. This allows for more filter options than :func:`list`. Good for finding a specific GUID or KB. Args: software (bool): Include software updates in the results (default is True) drivers (bool): Include driver updates in the results (default is True) summary (bool): - True: Return a summary of updates available for each category. - False (default): Return a detailed list of available updates. skip_installed (bool): Skip updates that are already installed. Default is False. skip_hidden (bool): Skip updates that have been hidden. Default is True. skip_mandatory (bool): Skip mandatory updates. Default is False. skip_reboot (bool): Skip updates that require a reboot. Default is False. categories (list): Specify the categories to list. Must be passed as a list. All categories returned by default. Categories include the following: * Critical Updates * Definition Updates * Drivers (make sure you set drivers=True) * Feature Packs * Security Updates * Update Rollups * Updates * Update Rollups * Windows 7 * Windows 8.1 * Windows 8.1 drivers * Windows 8.1 and later drivers * Windows Defender severities (list): Specify the severities to include. Must be passed as a list. All severities returned by default. Severities include the following: * Critical * Important Returns: dict: Returns a dict containing either a summary or a list of updates: .. code-block:: cfg List of Updates: {'<GUID>': {'Title': <title>, 'KB': <KB>, 'GUID': <the globally unique identifier for the update> 'Description': <description>, 'Downloaded': <has the update been downloaded>, 'Installed': <has the update been installed>, 'Mandatory': <is the update mandatory>, 'UserInput': <is user input required>, 'EULAAccepted': <has the EULA been accepted>, 'Severity': <update severity>, 'NeedsReboot': <is the update installed and awaiting reboot>, 'RebootBehavior': <will the update require a reboot>, 'Categories': [ '<category 1>', '<category 2>', ...] } } Summary of Updates: {'Total': <total number of updates returned>, 'Available': <updates that are not downloaded or installed>, 'Downloaded': <updates that are downloaded but not installed>, 'Installed': <updates installed (usually 0 unless installed=True)>, 'Categories': { <category 1>: <total for that category>, <category 2>: <total for category 2>, ... } } CLI Examples: .. code-block:: bash # Normal Usage (list all software updates) salt '*' win_wua.available # List all updates with categories of Critical Updates and Drivers salt '*' win_wua.available categories=["Critical Updates","Drivers"] # List all Critical Security Updates salt '*' win_wua.available categories=["Security Updates"] severities=["Critical"] # List all updates with a severity of Critical salt '*' win_wua.available severities=["Critical"] # A summary of all available updates salt '*' win_wua.available summary=True # A summary of all Feature Packs and Windows 8.1 Updates salt '*' win_wua.available categories=["Feature Packs","Windows 8.1"] summary=True ''' # Create a Windows Update Agent instance wua = salt.utils.win_update.WindowsUpdateAgent() # Look for available updates = wua.available( skip_hidden=skip_hidden, skip_installed=skip_installed, skip_mandatory=skip_mandatory, skip_reboot=skip_reboot, software=software, drivers=drivers, categories=categories, severities=severities) # Return results as Summary or Details return updates.summary() if summary else updates.list()
def _filter(self): """ delete the punctuation """ pattern = u"[\s+\.\!\-\/_,$%^*(+\"\']+|[+——!】【,。??:、:~@#¥%……&*“”()]+" self.m = re.sub(pattern, "", self.m)
delete the punctuation
Below is the the instruction that describes the task: ### Input: delete the punctuation ### Response: def _filter(self): """ delete the punctuation """ pattern = u"[\s+\.\!\-\/_,$%^*(+\"\']+|[+——!】【,。??:、:~@#¥%……&*“”()]+" self.m = re.sub(pattern, "", self.m)
def batch_keep_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13
Below is the the instruction that describes the task: ### Input: Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13 ### Response: def batch_keep_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "ROC AUC" transform = "identity" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
def transformer(self): """Return the class for this field that transforms non-Entity objects (e.g., dicts or binding objects) into Entity instances. Any non-None value returned from this method should implement a from_obj() and from_dict() method. Returns: None if no type_ or factory is defined by the field. Return a class with from_dict and from_obj methods otherwise. """ if self.factory: return self.factory elif self.type_: return self.type_ else: return None
Return the class for this field that transforms non-Entity objects (e.g., dicts or binding objects) into Entity instances. Any non-None value returned from this method should implement a from_obj() and from_dict() method. Returns: None if no type_ or factory is defined by the field. Return a class with from_dict and from_obj methods otherwise.
Below is the the instruction that describes the task: ### Input: Return the class for this field that transforms non-Entity objects (e.g., dicts or binding objects) into Entity instances. Any non-None value returned from this method should implement a from_obj() and from_dict() method. Returns: None if no type_ or factory is defined by the field. Return a class with from_dict and from_obj methods otherwise. ### Response: def transformer(self): """Return the class for this field that transforms non-Entity objects (e.g., dicts or binding objects) into Entity instances. Any non-None value returned from this method should implement a from_obj() and from_dict() method. Returns: None if no type_ or factory is defined by the field. Return a class with from_dict and from_obj methods otherwise. """ if self.factory: return self.factory elif self.type_: return self.type_ else: return None
def convert(self, value): """Convert self value.""" try: return convert_to_format(value, self._format) except (ValueError, TypeError): return value
Convert self value.
Below is the the instruction that describes the task: ### Input: Convert self value. ### Response: def convert(self, value): """Convert self value.""" try: return convert_to_format(value, self._format) except (ValueError, TypeError): return value
def shapes(self): """|GroupShapes| object for this group. The |GroupShapes| object provides access to the group's member shapes and provides methods for adding new ones. """ from pptx.shapes.shapetree import GroupShapes return GroupShapes(self._element, self)
|GroupShapes| object for this group. The |GroupShapes| object provides access to the group's member shapes and provides methods for adding new ones.
Below is the the instruction that describes the task: ### Input: |GroupShapes| object for this group. The |GroupShapes| object provides access to the group's member shapes and provides methods for adding new ones. ### Response: def shapes(self): """|GroupShapes| object for this group. The |GroupShapes| object provides access to the group's member shapes and provides methods for adding new ones. """ from pptx.shapes.shapetree import GroupShapes return GroupShapes(self._element, self)
def tcpip(self, port: int or str = 5555) -> None: '''Restart adb server listening on TCP on PORT.''' self._execute('-s', self.device_sn, 'tcpip', str(port))
Restart adb server listening on TCP on PORT.
Below is the the instruction that describes the task: ### Input: Restart adb server listening on TCP on PORT. ### Response: def tcpip(self, port: int or str = 5555) -> None: '''Restart adb server listening on TCP on PORT.''' self._execute('-s', self.device_sn, 'tcpip', str(port))
def _rt_parse_types(self, statement, element, mode, lineparser): """As part of parse_line(), checks for new type declarations in the statement.""" if mode == "insert": #Since we got to this point, there is *no* code element that owns the current #line which is being replaced; we are merely checking to see if the new line #being entered matches a type definition, do the same thing as "insert" tnew, start, end = self.tparser.parse_signature(statement, element, element) #We need to set sensible boundaries for 'start' and 'end' so that if the lines #immediately after this one are member definitions (for example) they get #associated correctly with this type. if tnew is not None: tnew.start, tnew.end = lineparser.absolute_charindex(statement, start, end) tnew.incomplete = True element.types[tnew.name.lower()] = tnew lineparser.additions.append((tnew, element))
As part of parse_line(), checks for new type declarations in the statement.
Below is the the instruction that describes the task: ### Input: As part of parse_line(), checks for new type declarations in the statement. ### Response: def _rt_parse_types(self, statement, element, mode, lineparser): """As part of parse_line(), checks for new type declarations in the statement.""" if mode == "insert": #Since we got to this point, there is *no* code element that owns the current #line which is being replaced; we are merely checking to see if the new line #being entered matches a type definition, do the same thing as "insert" tnew, start, end = self.tparser.parse_signature(statement, element, element) #We need to set sensible boundaries for 'start' and 'end' so that if the lines #immediately after this one are member definitions (for example) they get #associated correctly with this type. if tnew is not None: tnew.start, tnew.end = lineparser.absolute_charindex(statement, start, end) tnew.incomplete = True element.types[tnew.name.lower()] = tnew lineparser.additions.append((tnew, element))
def unixtime2str(timestamp, fmt='%Y-%m-%d %H:%M:%S'): """ 将 ``秒级别的时间戳`` 转换成字符串 .. warning: 时间戳是以 ``秒`` 为单位的 :param timestamp: unix timestamp :type timestamp: int :param fmt: show format :type fmt: str :return: :rtype: str """ dt = None try: timestamp = time.localtime(timestamp) dt = time.strftime(fmt, timestamp) except Exception as err: print(err) return dt
将 ``秒级别的时间戳`` 转换成字符串 .. warning: 时间戳是以 ``秒`` 为单位的 :param timestamp: unix timestamp :type timestamp: int :param fmt: show format :type fmt: str :return: :rtype: str
Below is the the instruction that describes the task: ### Input: 将 ``秒级别的时间戳`` 转换成字符串 .. warning: 时间戳是以 ``秒`` 为单位的 :param timestamp: unix timestamp :type timestamp: int :param fmt: show format :type fmt: str :return: :rtype: str ### Response: def unixtime2str(timestamp, fmt='%Y-%m-%d %H:%M:%S'): """ 将 ``秒级别的时间戳`` 转换成字符串 .. warning: 时间戳是以 ``秒`` 为单位的 :param timestamp: unix timestamp :type timestamp: int :param fmt: show format :type fmt: str :return: :rtype: str """ dt = None try: timestamp = time.localtime(timestamp) dt = time.strftime(fmt, timestamp) except Exception as err: print(err) return dt
def load_config(self): """Load the config from the config file or template.""" config = Config() self.config_obj = config.load('awsshellrc') self.config_section = self.config_obj['aws-shell'] self.model_completer.match_fuzzy = self.config_section.as_bool( 'match_fuzzy') self.enable_vi_bindings = self.config_section.as_bool( 'enable_vi_bindings') self.show_completion_columns = self.config_section.as_bool( 'show_completion_columns') self.show_help = self.config_section.as_bool('show_help') self.theme = self.config_section['theme']
Load the config from the config file or template.
Below is the the instruction that describes the task: ### Input: Load the config from the config file or template. ### Response: def load_config(self): """Load the config from the config file or template.""" config = Config() self.config_obj = config.load('awsshellrc') self.config_section = self.config_obj['aws-shell'] self.model_completer.match_fuzzy = self.config_section.as_bool( 'match_fuzzy') self.enable_vi_bindings = self.config_section.as_bool( 'enable_vi_bindings') self.show_completion_columns = self.config_section.as_bool( 'show_completion_columns') self.show_help = self.config_section.as_bool('show_help') self.theme = self.config_section['theme']
def find_path(self, notebook_id): """Return a full path to a notebook given its notebook_id.""" try: name = self.mapping[notebook_id] except KeyError: raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) return self.get_path_by_name(name)
Return a full path to a notebook given its notebook_id.
Below is the the instruction that describes the task: ### Input: Return a full path to a notebook given its notebook_id. ### Response: def find_path(self, notebook_id): """Return a full path to a notebook given its notebook_id.""" try: name = self.mapping[notebook_id] except KeyError: raise web.HTTPError(404, u'Notebook does not exist: %s' % notebook_id) return self.get_path_by_name(name)
def seq_length_dist_plot (self): """ Create the HTML for the Sequence Length Distribution plot """ data = dict() seq_lengths = set() multiple_lenths = False for s_name in self.fastqc_data: try: data[s_name] = {self.avg_bp_from_range(d['length']): d['count'] for d in self.fastqc_data[s_name]['sequence_length_distribution']} seq_lengths.update(data[s_name].keys()) if len(set(data[s_name].keys())) > 1: multiple_lenths = True except KeyError: pass if len(data) == 0: log.debug('sequence_length_distribution not found in FastQC reports') return None if not multiple_lenths: lengths = 'bp , '.join([str(l) for l in list(seq_lengths)]) desc = 'All samples have sequences of a single length ({}bp).'.format(lengths) if len(seq_lengths) > 1: desc += ' See the <a href="#general_stats">General Statistics Table</a>.' self.add_section ( name = 'Sequence Length Distribution', anchor = 'fastqc_sequence_length_distribution', description = '<div class="alert alert-info">{}</div>'.format(desc) ) else: pconfig = { 'id': 'fastqc_sequence_length_distribution_plot', 'title': 'FastQC: Sequence Length Distribution', 'ylab': 'Read Count', 'xlab': 'Sequence Length (bp)', 'ymin': 0, 'yMinTickInterval': 0.1, 'xDecimals': False, 'colors': self.get_status_cols('sequence_length_distribution'), 'tt_label': '<b>{point.x} bp</b>: {point.y}', } self.add_section ( name = 'Sequence Length Distribution', anchor = 'fastqc_sequence_length_distribution', description = '''The distribution of fragment sizes (read lengths) found. See the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/7%20Sequence%20Length%20Distribution.html)''', plot = linegraph.plot(data, pconfig) )
Create the HTML for the Sequence Length Distribution plot
Below is the the instruction that describes the task: ### Input: Create the HTML for the Sequence Length Distribution plot ### Response: def seq_length_dist_plot (self): """ Create the HTML for the Sequence Length Distribution plot """ data = dict() seq_lengths = set() multiple_lenths = False for s_name in self.fastqc_data: try: data[s_name] = {self.avg_bp_from_range(d['length']): d['count'] for d in self.fastqc_data[s_name]['sequence_length_distribution']} seq_lengths.update(data[s_name].keys()) if len(set(data[s_name].keys())) > 1: multiple_lenths = True except KeyError: pass if len(data) == 0: log.debug('sequence_length_distribution not found in FastQC reports') return None if not multiple_lenths: lengths = 'bp , '.join([str(l) for l in list(seq_lengths)]) desc = 'All samples have sequences of a single length ({}bp).'.format(lengths) if len(seq_lengths) > 1: desc += ' See the <a href="#general_stats">General Statistics Table</a>.' self.add_section ( name = 'Sequence Length Distribution', anchor = 'fastqc_sequence_length_distribution', description = '<div class="alert alert-info">{}</div>'.format(desc) ) else: pconfig = { 'id': 'fastqc_sequence_length_distribution_plot', 'title': 'FastQC: Sequence Length Distribution', 'ylab': 'Read Count', 'xlab': 'Sequence Length (bp)', 'ymin': 0, 'yMinTickInterval': 0.1, 'xDecimals': False, 'colors': self.get_status_cols('sequence_length_distribution'), 'tt_label': '<b>{point.x} bp</b>: {point.y}', } self.add_section ( name = 'Sequence Length Distribution', anchor = 'fastqc_sequence_length_distribution', description = '''The distribution of fragment sizes (read lengths) found. See the [FastQC help](http://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/3%20Analysis%20Modules/7%20Sequence%20Length%20Distribution.html)''', plot = linegraph.plot(data, pconfig) )
def d3logpdf_dlink3(self, link_f, y, Y_metadata=None): """ Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array """ d3lik_dlink3 = -special.polygamma(2, self.beta*link_f)*(self.beta**3) return d3lik_dlink3
Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array
Below is the the instruction that describes the task: ### Input: Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array ### Response: def d3logpdf_dlink3(self, link_f, y, Y_metadata=None): """ Third order derivative log-likelihood function at y given link(f) w.r.t link(f) .. math:: \\frac{d^{3} \\ln p(y_{i}|\lambda(f_{i}))}{d^{3}\\lambda(f)} = -\\beta^{3}\\frac{d^{2}\\Psi(\\alpha_{i})}{d\\alpha_{i}}\\\\ \\alpha_{i} = \\beta y_{i} :param link_f: latent variables link(f) :type link_f: Nx1 array :param y: data :type y: Nx1 array :param Y_metadata: Y_metadata which is not used in gamma distribution :returns: third derivative of likelihood evaluated at points f :rtype: Nx1 array """ d3lik_dlink3 = -special.polygamma(2, self.beta*link_f)*(self.beta**3) return d3lik_dlink3
async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
Handle incoming API frame, return True if this was the expected frame.
Below is the the instruction that describes the task: ### Input: Handle incoming API frame, return True if this was the expected frame. ### Response: async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
def _get_vrf_name(self, ri): """ overloaded method for generating a vrf_name that supports region_id """ router_id = ri.router_name()[:self.DEV_NAME_LEN] is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region if is_multi_region_enabled: region_id = cfg.CONF.multi_region.region_id vrf_name = "%s-%s" % (router_id, region_id) else: vrf_name = router_id return vrf_name
overloaded method for generating a vrf_name that supports region_id
Below is the the instruction that describes the task: ### Input: overloaded method for generating a vrf_name that supports region_id ### Response: def _get_vrf_name(self, ri): """ overloaded method for generating a vrf_name that supports region_id """ router_id = ri.router_name()[:self.DEV_NAME_LEN] is_multi_region_enabled = cfg.CONF.multi_region.enable_multi_region if is_multi_region_enabled: region_id = cfg.CONF.multi_region.region_id vrf_name = "%s-%s" % (router_id, region_id) else: vrf_name = router_id return vrf_name
def write_log(self, message): """ Proxy method for GeneralLogger. """ if self.stream_log and not self.ended: # points to the Git stream write self.stream_log.write(message) return True
Proxy method for GeneralLogger.
Below is the the instruction that describes the task: ### Input: Proxy method for GeneralLogger. ### Response: def write_log(self, message): """ Proxy method for GeneralLogger. """ if self.stream_log and not self.ended: # points to the Git stream write self.stream_log.write(message) return True
def send_nest_params_to_sli(p): ''' Read parameters and send them to SLI Parameters ---------- p : dict sli parameter name and value as dictionary key and value pairs Returns ------- None ''' for name in p.keys(): value = p[name] if type(value) == np.ndarray: value = value.tolist() if type(value) == dict: value = dict_of_numpyarray_to_dict_of_list(value) if name == 'neuron_model': # special case as neuron_model is a # NEST model and not a string try: nest.sli_run('/'+name) nest.sli_push(value) nest.sli_run('eval') nest.sli_run('def') except: print 'Could not put variable %s on SLI stack' % (name) print type(value) else: try: nest.sli_run('/'+name) nest.sli_push(value) nest.sli_run('def') except: print 'Could not put variable %s on SLI stack' % (name) print type(value) return
Read parameters and send them to SLI Parameters ---------- p : dict sli parameter name and value as dictionary key and value pairs Returns ------- None
Below is the the instruction that describes the task: ### Input: Read parameters and send them to SLI Parameters ---------- p : dict sli parameter name and value as dictionary key and value pairs Returns ------- None ### Response: def send_nest_params_to_sli(p): ''' Read parameters and send them to SLI Parameters ---------- p : dict sli parameter name and value as dictionary key and value pairs Returns ------- None ''' for name in p.keys(): value = p[name] if type(value) == np.ndarray: value = value.tolist() if type(value) == dict: value = dict_of_numpyarray_to_dict_of_list(value) if name == 'neuron_model': # special case as neuron_model is a # NEST model and not a string try: nest.sli_run('/'+name) nest.sli_push(value) nest.sli_run('eval') nest.sli_run('def') except: print 'Could not put variable %s on SLI stack' % (name) print type(value) else: try: nest.sli_run('/'+name) nest.sli_push(value) nest.sli_run('def') except: print 'Could not put variable %s on SLI stack' % (name) print type(value) return
def _calcsize(fmt): '''struct.calcsize() handling 'z' for Py_ssize_t. ''' # sizeof(long) != sizeof(ssize_t) on LLP64 if _sizeof_Clong < _sizeof_Cvoidp: # pragma: no coverage z = 'P' else: z = 'L' return calcsize(fmt.replace('z', z))
struct.calcsize() handling 'z' for Py_ssize_t.
Below is the the instruction that describes the task: ### Input: struct.calcsize() handling 'z' for Py_ssize_t. ### Response: def _calcsize(fmt): '''struct.calcsize() handling 'z' for Py_ssize_t. ''' # sizeof(long) != sizeof(ssize_t) on LLP64 if _sizeof_Clong < _sizeof_Cvoidp: # pragma: no coverage z = 'P' else: z = 'L' return calcsize(fmt.replace('z', z))
def Guo_Sun(dp, voidage, vs, rho, mu, Dt, L=1): r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [1]_. This is valid for highly-packed particles at particle/tube diameter ratios between 2 and 3, where a ring packing structure occurs. If a packing ratio is so low, it is important to use this model because in some cases its predictions are as low as half those of other models! .. math:: f_v = 180 + \left(9.5374\frac{d_p}{D_t} - 2.8054\right)Re_{Erg}^{0.97} .. math:: f_v = \frac{\Delta P d_p^2}{\mu v_s L}\frac{\epsilon^3}{(1-\epsilon)^2} .. math:: Re_{Erg} = \frac{\rho v_s d_p}{\mu(1-\epsilon)} Parameters ---------- dp : float Particle diameter of spheres [m] voidage : float Void fraction of bed packing [-] vs : float Superficial velocity of the fluid (volumetric flow rate/cross-sectional area)[m/s] rho : float Density of the fluid [kg/m^3] mu : float Viscosity of the fluid, [Pa*s] Dt : float Diameter of the tube, [m] L : float, optional Length the fluid flows in the packed bed [m] Returns ------- dP : float Pressure drop across the bed [Pa] Notes ----- Developed with data in the range of: .. math:: 100 < Re_{m} <33000\\ 2 < d_t/d_p < 3 1\\ 0.476 < \epsilon <0.492 Examples -------- >>> Guo_Sun(dp=14.2E-3, voidage=0.492, vs=0.6, rho=1E3, mu=1E-3, Dt=40.9E-3) 42019.529911473706 References ---------- .. [1] Guo, Zehua, Zhongning Sun, Nan Zhang, Ming Ding, and Jiaqing Liu. "Pressure Drop in Slender Packed Beds with Novel Packing Arrangement." Powder Technology 321 (November 2017): 286-92. doi:10.1016/j.powtec.2017.08.024. ''' # 2 < D/d < 3, particles in contact with the wall tend to form a highly ordered ring structure. Rem = dp*rho*vs/mu/(1-voidage) fv = 180 + (9.5374*dp/Dt - 2.8054)*Rem**0.97 return fv*(mu*vs*L/dp**2)*(1-voidage)**2/voidage**3
r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [1]_. This is valid for highly-packed particles at particle/tube diameter ratios between 2 and 3, where a ring packing structure occurs. If a packing ratio is so low, it is important to use this model because in some cases its predictions are as low as half those of other models! .. math:: f_v = 180 + \left(9.5374\frac{d_p}{D_t} - 2.8054\right)Re_{Erg}^{0.97} .. math:: f_v = \frac{\Delta P d_p^2}{\mu v_s L}\frac{\epsilon^3}{(1-\epsilon)^2} .. math:: Re_{Erg} = \frac{\rho v_s d_p}{\mu(1-\epsilon)} Parameters ---------- dp : float Particle diameter of spheres [m] voidage : float Void fraction of bed packing [-] vs : float Superficial velocity of the fluid (volumetric flow rate/cross-sectional area)[m/s] rho : float Density of the fluid [kg/m^3] mu : float Viscosity of the fluid, [Pa*s] Dt : float Diameter of the tube, [m] L : float, optional Length the fluid flows in the packed bed [m] Returns ------- dP : float Pressure drop across the bed [Pa] Notes ----- Developed with data in the range of: .. math:: 100 < Re_{m} <33000\\ 2 < d_t/d_p < 3 1\\ 0.476 < \epsilon <0.492 Examples -------- >>> Guo_Sun(dp=14.2E-3, voidage=0.492, vs=0.6, rho=1E3, mu=1E-3, Dt=40.9E-3) 42019.529911473706 References ---------- .. [1] Guo, Zehua, Zhongning Sun, Nan Zhang, Ming Ding, and Jiaqing Liu. "Pressure Drop in Slender Packed Beds with Novel Packing Arrangement." Powder Technology 321 (November 2017): 286-92. doi:10.1016/j.powtec.2017.08.024.
Below is the the instruction that describes the task: ### Input: r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [1]_. This is valid for highly-packed particles at particle/tube diameter ratios between 2 and 3, where a ring packing structure occurs. If a packing ratio is so low, it is important to use this model because in some cases its predictions are as low as half those of other models! .. math:: f_v = 180 + \left(9.5374\frac{d_p}{D_t} - 2.8054\right)Re_{Erg}^{0.97} .. math:: f_v = \frac{\Delta P d_p^2}{\mu v_s L}\frac{\epsilon^3}{(1-\epsilon)^2} .. math:: Re_{Erg} = \frac{\rho v_s d_p}{\mu(1-\epsilon)} Parameters ---------- dp : float Particle diameter of spheres [m] voidage : float Void fraction of bed packing [-] vs : float Superficial velocity of the fluid (volumetric flow rate/cross-sectional area)[m/s] rho : float Density of the fluid [kg/m^3] mu : float Viscosity of the fluid, [Pa*s] Dt : float Diameter of the tube, [m] L : float, optional Length the fluid flows in the packed bed [m] Returns ------- dP : float Pressure drop across the bed [Pa] Notes ----- Developed with data in the range of: .. math:: 100 < Re_{m} <33000\\ 2 < d_t/d_p < 3 1\\ 0.476 < \epsilon <0.492 Examples -------- >>> Guo_Sun(dp=14.2E-3, voidage=0.492, vs=0.6, rho=1E3, mu=1E-3, Dt=40.9E-3) 42019.529911473706 References ---------- .. [1] Guo, Zehua, Zhongning Sun, Nan Zhang, Ming Ding, and Jiaqing Liu. "Pressure Drop in Slender Packed Beds with Novel Packing Arrangement." Powder Technology 321 (November 2017): 286-92. doi:10.1016/j.powtec.2017.08.024. ### Response: def Guo_Sun(dp, voidage, vs, rho, mu, Dt, L=1): r'''Calculates pressure drop across a packed bed of spheres using a correlation developed in [1]_. This is valid for highly-packed particles at particle/tube diameter ratios between 2 and 3, where a ring packing structure occurs. If a packing ratio is so low, it is important to use this model because in some cases its predictions are as low as half those of other models! .. math:: f_v = 180 + \left(9.5374\frac{d_p}{D_t} - 2.8054\right)Re_{Erg}^{0.97} .. math:: f_v = \frac{\Delta P d_p^2}{\mu v_s L}\frac{\epsilon^3}{(1-\epsilon)^2} .. math:: Re_{Erg} = \frac{\rho v_s d_p}{\mu(1-\epsilon)} Parameters ---------- dp : float Particle diameter of spheres [m] voidage : float Void fraction of bed packing [-] vs : float Superficial velocity of the fluid (volumetric flow rate/cross-sectional area)[m/s] rho : float Density of the fluid [kg/m^3] mu : float Viscosity of the fluid, [Pa*s] Dt : float Diameter of the tube, [m] L : float, optional Length the fluid flows in the packed bed [m] Returns ------- dP : float Pressure drop across the bed [Pa] Notes ----- Developed with data in the range of: .. math:: 100 < Re_{m} <33000\\ 2 < d_t/d_p < 3 1\\ 0.476 < \epsilon <0.492 Examples -------- >>> Guo_Sun(dp=14.2E-3, voidage=0.492, vs=0.6, rho=1E3, mu=1E-3, Dt=40.9E-3) 42019.529911473706 References ---------- .. [1] Guo, Zehua, Zhongning Sun, Nan Zhang, Ming Ding, and Jiaqing Liu. "Pressure Drop in Slender Packed Beds with Novel Packing Arrangement." Powder Technology 321 (November 2017): 286-92. doi:10.1016/j.powtec.2017.08.024. ''' # 2 < D/d < 3, particles in contact with the wall tend to form a highly ordered ring structure. Rem = dp*rho*vs/mu/(1-voidage) fv = 180 + (9.5374*dp/Dt - 2.8054)*Rem**0.97 return fv*(mu*vs*L/dp**2)*(1-voidage)**2/voidage**3
def notice(txt, color=False): "print notice" if color: txt = config.Col.WARNING + txt + config.Col.ENDC print(txt)
print notice
Below is the the instruction that describes the task: ### Input: print notice ### Response: def notice(txt, color=False): "print notice" if color: txt = config.Col.WARNING + txt + config.Col.ENDC print(txt)
def get_scheme(self): """When Splunk starts, it looks for all the modular inputs defined by its configuration, and tries to run them with the argument --scheme. Splunkd expects the modular inputs to print a description of the input in XML on stdout. The modular input framework takes care of all the details of formatting XML and printing it. The user need only override get_scheme and return a new Scheme object. :return: scheme, a Scheme object """ # "random_numbers" is the name Splunk will display to users for this input. scheme = Scheme("Random Numbers") scheme.description = "Streams events containing a random number." # If you set external validation to True, without overriding validate_input, # the script will accept anything as valid. Generally you only need external # validation if there are relationships you must maintain among the # parameters, such as requiring min to be less than max in this example, # or you need to check that some resource is reachable or valid. # Otherwise, Splunk lets you specify a validation string for each argument # and will run validation internally using that string. scheme.use_external_validation = True scheme.use_single_instance = True min_argument = Argument("min") min_argument.title = "Minimum" min_argument.data_type = Argument.data_type_number min_argument.description = "Minimum random number to be produced by this input." min_argument.required_on_create = True # If you are not using external validation, you would add something like: # # scheme.validation = "min > 0" scheme.add_argument(min_argument) max_argument = Argument("max") max_argument.title = "Maximum" max_argument.data_type = Argument.data_type_number max_argument.description = "Maximum random number to be produced by this input." max_argument.required_on_create = True scheme.add_argument(max_argument) return scheme
When Splunk starts, it looks for all the modular inputs defined by its configuration, and tries to run them with the argument --scheme. Splunkd expects the modular inputs to print a description of the input in XML on stdout. The modular input framework takes care of all the details of formatting XML and printing it. The user need only override get_scheme and return a new Scheme object. :return: scheme, a Scheme object
Below is the the instruction that describes the task: ### Input: When Splunk starts, it looks for all the modular inputs defined by its configuration, and tries to run them with the argument --scheme. Splunkd expects the modular inputs to print a description of the input in XML on stdout. The modular input framework takes care of all the details of formatting XML and printing it. The user need only override get_scheme and return a new Scheme object. :return: scheme, a Scheme object ### Response: def get_scheme(self): """When Splunk starts, it looks for all the modular inputs defined by its configuration, and tries to run them with the argument --scheme. Splunkd expects the modular inputs to print a description of the input in XML on stdout. The modular input framework takes care of all the details of formatting XML and printing it. The user need only override get_scheme and return a new Scheme object. :return: scheme, a Scheme object """ # "random_numbers" is the name Splunk will display to users for this input. scheme = Scheme("Random Numbers") scheme.description = "Streams events containing a random number." # If you set external validation to True, without overriding validate_input, # the script will accept anything as valid. Generally you only need external # validation if there are relationships you must maintain among the # parameters, such as requiring min to be less than max in this example, # or you need to check that some resource is reachable or valid. # Otherwise, Splunk lets you specify a validation string for each argument # and will run validation internally using that string. scheme.use_external_validation = True scheme.use_single_instance = True min_argument = Argument("min") min_argument.title = "Minimum" min_argument.data_type = Argument.data_type_number min_argument.description = "Minimum random number to be produced by this input." min_argument.required_on_create = True # If you are not using external validation, you would add something like: # # scheme.validation = "min > 0" scheme.add_argument(min_argument) max_argument = Argument("max") max_argument.title = "Maximum" max_argument.data_type = Argument.data_type_number max_argument.description = "Maximum random number to be produced by this input." max_argument.required_on_create = True scheme.add_argument(max_argument) return scheme
def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any: """Pull a value from the request's ``match_info``.""" return core.get_value(req.match_info, name, field)
Pull a value from the request's ``match_info``.
Below is the the instruction that describes the task: ### Input: Pull a value from the request's ``match_info``. ### Response: def parse_match_info(self, req: Request, name: str, field: Field) -> typing.Any: """Pull a value from the request's ``match_info``.""" return core.get_value(req.match_info, name, field)
def rot13(self, encrypyted): ''' Very secure encryption (ceaser used it), apply it multiple times''' def random(): # guarnteed random from fair dice roll return 4 return codecs.encode( codecs.encode( crypto(encrypyted).decode(), 'rot_{amount}'.format(amount=int(52/random()))), 'rot_{salt}'.format(salt=int(52/random())))
Very secure encryption (ceaser used it), apply it multiple times
Below is the the instruction that describes the task: ### Input: Very secure encryption (ceaser used it), apply it multiple times ### Response: def rot13(self, encrypyted): ''' Very secure encryption (ceaser used it), apply it multiple times''' def random(): # guarnteed random from fair dice roll return 4 return codecs.encode( codecs.encode( crypto(encrypyted).decode(), 'rot_{amount}'.format(amount=int(52/random()))), 'rot_{salt}'.format(salt=int(52/random())))
def install_python_module_locally(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): local('pip --quiet install %s' % name)
instals a python module using pip
Below is the the instruction that describes the task: ### Input: instals a python module using pip ### Response: def install_python_module_locally(name): """ instals a python module using pip """ with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=False, capture=True): local('pip --quiet install %s' % name)
def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=('x', 'y', 'z'), c='', delim=','): """ Convenience method. Given matrix returns string, e.g. x+2y+1/4 :param matrix :param translation_vec :param components: either ('x', 'y', 'z') or ('a', 'b', 'c') :param c: optional additional character to print (used for magmoms) :param delim: delimiter :return: xyz string """ parts = [] for i in range(3): s = '' m = matrix[i] t = translation_vec[i] for j, dim in enumerate(components): if m[j] != 0: f = Fraction(m[j]).limit_denominator() if s != '' and f >= 0: s += '+' if abs(f.numerator) != 1: s += str(f.numerator) elif f < 0: s += '-' s += c + dim if f.denominator != 1: s += '/' + str(f.denominator) if t != 0: s += ('+' if (t > 0 and s != '') else '') + str(Fraction(t).limit_denominator()) if s == '': s += '0' parts.append(s) return delim.join(parts)
Convenience method. Given matrix returns string, e.g. x+2y+1/4 :param matrix :param translation_vec :param components: either ('x', 'y', 'z') or ('a', 'b', 'c') :param c: optional additional character to print (used for magmoms) :param delim: delimiter :return: xyz string
Below is the the instruction that describes the task: ### Input: Convenience method. Given matrix returns string, e.g. x+2y+1/4 :param matrix :param translation_vec :param components: either ('x', 'y', 'z') or ('a', 'b', 'c') :param c: optional additional character to print (used for magmoms) :param delim: delimiter :return: xyz string ### Response: def transformation_to_string(matrix, translation_vec=(0, 0, 0), components=('x', 'y', 'z'), c='', delim=','): """ Convenience method. Given matrix returns string, e.g. x+2y+1/4 :param matrix :param translation_vec :param components: either ('x', 'y', 'z') or ('a', 'b', 'c') :param c: optional additional character to print (used for magmoms) :param delim: delimiter :return: xyz string """ parts = [] for i in range(3): s = '' m = matrix[i] t = translation_vec[i] for j, dim in enumerate(components): if m[j] != 0: f = Fraction(m[j]).limit_denominator() if s != '' and f >= 0: s += '+' if abs(f.numerator) != 1: s += str(f.numerator) elif f < 0: s += '-' s += c + dim if f.denominator != 1: s += '/' + str(f.denominator) if t != 0: s += ('+' if (t > 0 and s != '') else '') + str(Fraction(t).limit_denominator()) if s == '': s += '0' parts.append(s) return delim.join(parts)
def disable_hostgroup_svc_checks(self, hostgroup): """Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.disable_svc_check(self.daemon.services[service_id])
Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None
Below is the the instruction that describes the task: ### Input: Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None ### Response: def disable_hostgroup_svc_checks(self, hostgroup): """Disable service checks for a hostgroup Format of the line that triggers function call:: DISABLE_HOSTGROUP_SVC_CHECKS;<hostgroup_name> :param hostgroup: hostgroup to disable :type hostgroup: alignak.objects.hostgroup.Hostgroup :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: for service_id in self.daemon.hosts[host_id].services: if service_id in self.daemon.services: self.disable_svc_check(self.daemon.services[service_id])
def cbpdnmsk_class_label_lookup(label): """Get a ConvBPDNMask class from a label string.""" clsmod = {'admm': admm_cbpdn.ConvBPDNMaskDcpl, 'fista': fista_cbpdn.ConvBPDNMask} if label in clsmod: return clsmod[label] else: raise ValueError('Unknown ConvBPDNMask solver method %s' % label)
Get a ConvBPDNMask class from a label string.
Below is the the instruction that describes the task: ### Input: Get a ConvBPDNMask class from a label string. ### Response: def cbpdnmsk_class_label_lookup(label): """Get a ConvBPDNMask class from a label string.""" clsmod = {'admm': admm_cbpdn.ConvBPDNMaskDcpl, 'fista': fista_cbpdn.ConvBPDNMask} if label in clsmod: return clsmod[label] else: raise ValueError('Unknown ConvBPDNMask solver method %s' % label)
def _iter_table_records(self): """ Generate a (tag, offset, length) 3-tuple for each of the tables in this font file. """ count = self._table_count bufr = self._stream.read(offset=12, length=count*16) tmpl = '>4sLLL' for i in range(count): offset = i * 16 tag, checksum, off, len_ = unpack_from(tmpl, bufr, offset) yield tag.decode('utf-8'), off, len_
Generate a (tag, offset, length) 3-tuple for each of the tables in this font file.
Below is the the instruction that describes the task: ### Input: Generate a (tag, offset, length) 3-tuple for each of the tables in this font file. ### Response: def _iter_table_records(self): """ Generate a (tag, offset, length) 3-tuple for each of the tables in this font file. """ count = self._table_count bufr = self._stream.read(offset=12, length=count*16) tmpl = '>4sLLL' for i in range(count): offset = i * 16 tag, checksum, off, len_ = unpack_from(tmpl, bufr, offset) yield tag.decode('utf-8'), off, len_
def jinja_env(self) -> Environment: """The jinja environment used to load templates.""" if self._jinja_env is None: self._jinja_env = self.create_jinja_environment() return self._jinja_env
The jinja environment used to load templates.
Below is the the instruction that describes the task: ### Input: The jinja environment used to load templates. ### Response: def jinja_env(self) -> Environment: """The jinja environment used to load templates.""" if self._jinja_env is None: self._jinja_env = self.create_jinja_environment() return self._jinja_env
def add_projection(query_proto, *projection): """Add projection properties to the given datatstore.Query proto message.""" for p in projection: proto = query_proto.projection.add() proto.property.name = p
Add projection properties to the given datatstore.Query proto message.
Below is the the instruction that describes the task: ### Input: Add projection properties to the given datatstore.Query proto message. ### Response: def add_projection(query_proto, *projection): """Add projection properties to the given datatstore.Query proto message.""" for p in projection: proto = query_proto.projection.add() proto.property.name = p
def color(self, *args): ''' :param args: color in a supported format. :return: Color object containing the color. ''' return self.Color(mode=self.color_mode, color_range=self.color_range, *args)
:param args: color in a supported format. :return: Color object containing the color.
Below is the the instruction that describes the task: ### Input: :param args: color in a supported format. :return: Color object containing the color. ### Response: def color(self, *args): ''' :param args: color in a supported format. :return: Color object containing the color. ''' return self.Color(mode=self.color_mode, color_range=self.color_range, *args)
def show_fig_outline_in_viewer(self, state): """Draw a frame around the figure viewer if state is True.""" if state is True: self.figviewer.figcanvas.setStyleSheet( "FigureCanvas{border: 1px solid lightgrey;}") else: self.figviewer.figcanvas.setStyleSheet("FigureCanvas{}") self.option_changed('show_plot_outline', state)
Draw a frame around the figure viewer if state is True.
Below is the the instruction that describes the task: ### Input: Draw a frame around the figure viewer if state is True. ### Response: def show_fig_outline_in_viewer(self, state): """Draw a frame around the figure viewer if state is True.""" if state is True: self.figviewer.figcanvas.setStyleSheet( "FigureCanvas{border: 1px solid lightgrey;}") else: self.figviewer.figcanvas.setStyleSheet("FigureCanvas{}") self.option_changed('show_plot_outline', state)
def get_rst_excerpt(rst_doc: document, paragraphs: int = 1) -> str: """ Given rst, parse and return a portion """ texts = [] for count, p in enumerate(rst_doc.traverse(paragraph)): texts.append(p.astext()) if count + 1 == paragraphs: break return ' '.join(texts)
Given rst, parse and return a portion
Below is the the instruction that describes the task: ### Input: Given rst, parse and return a portion ### Response: def get_rst_excerpt(rst_doc: document, paragraphs: int = 1) -> str: """ Given rst, parse and return a portion """ texts = [] for count, p in enumerate(rst_doc.traverse(paragraph)): texts.append(p.astext()) if count + 1 == paragraphs: break return ' '.join(texts)
def _from_binary_ea(cls, binary_stream): """See base class.""" _ea_list = [] offset = 0 #_MOD_LOGGER.debug(f"Creating Ea object from binary stream {binary_stream.tobytes()}...") _MOD_LOGGER.debug("Creating Ea object from binary '%s'...", binary_stream.tobytes()) while True: entry = EaEntry.create_from_binary(binary_stream[offset:]) offset += entry.offset_next_ea _ea_list.append(entry) if offset >= len(binary_stream): break nw_obj = cls(_ea_list) _MOD_LOGGER.debug("Attempted to unpack EA from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
See base class.
Below is the the instruction that describes the task: ### Input: See base class. ### Response: def _from_binary_ea(cls, binary_stream): """See base class.""" _ea_list = [] offset = 0 #_MOD_LOGGER.debug(f"Creating Ea object from binary stream {binary_stream.tobytes()}...") _MOD_LOGGER.debug("Creating Ea object from binary '%s'...", binary_stream.tobytes()) while True: entry = EaEntry.create_from_binary(binary_stream[offset:]) offset += entry.offset_next_ea _ea_list.append(entry) if offset >= len(binary_stream): break nw_obj = cls(_ea_list) _MOD_LOGGER.debug("Attempted to unpack EA from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
def deepnn(x): """deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. """ # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope("reshape"): x_image = tf.reshape(x, [-1, 28, 28, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope("conv1"): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer - downsamples by 2X. with tf.name_scope("pool1"): h_pool1 = max_pool_2x2(h_conv1) # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope("conv2"): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # Second pooling layer. with tf.name_scope("pool2"): h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. with tf.name_scope("fc1"): W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of # features. with tf.name_scope("dropout"): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope("fc2"): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 return y_conv, keep_prob
deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout.
Below is the the instruction that describes the task: ### Input: deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. ### Response: def deepnn(x): """deepnn builds the graph for a deep net for classifying digits. Args: x: an input tensor with the dimensions (N_examples, 784), where 784 is the number of pixels in a standard MNIST image. Returns: A tuple (y, keep_prob). y is a tensor of shape (N_examples, 10), with values equal to the logits of classifying the digit into one of 10 classes (the digits 0-9). keep_prob is a scalar placeholder for the probability of dropout. """ # Reshape to use within a convolutional neural net. # Last dimension is for "features" - there is only one here, since images # are grayscale -- it would be 3 for an RGB image, 4 for RGBA, etc. with tf.name_scope("reshape"): x_image = tf.reshape(x, [-1, 28, 28, 1]) # First convolutional layer - maps one grayscale image to 32 feature maps. with tf.name_scope("conv1"): W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # Pooling layer - downsamples by 2X. with tf.name_scope("pool1"): h_pool1 = max_pool_2x2(h_conv1) # Second convolutional layer -- maps 32 feature maps to 64. with tf.name_scope("conv2"): W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # Second pooling layer. with tf.name_scope("pool2"): h_pool2 = max_pool_2x2(h_conv2) # Fully connected layer 1 -- after 2 round of downsampling, our 28x28 image # is down to 7x7x64 feature maps -- maps this to 1024 features. with tf.name_scope("fc1"): W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # Dropout - controls the complexity of the model, prevents co-adaptation of # features. with tf.name_scope("dropout"): keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # Map the 1024 features to 10 classes, one for each digit with tf.name_scope("fc2"): W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 return y_conv, keep_prob
def put(self, message): """Put a message into the outgoing message stack. Outgoing message will be stored indefinitely to support multi-users. """ with self._outgoing_lock: self._outgoing.append(message) self._outgoing_counter += 1 # Check to see if there are pending queues waiting for the item. if self._outgoing_counter in self._outgoing_pending_queues: for q in self._outgoing_pending_queues[self._outgoing_counter]: q.put(message) del self._outgoing_pending_queues[self._outgoing_counter]
Put a message into the outgoing message stack. Outgoing message will be stored indefinitely to support multi-users.
Below is the the instruction that describes the task: ### Input: Put a message into the outgoing message stack. Outgoing message will be stored indefinitely to support multi-users. ### Response: def put(self, message): """Put a message into the outgoing message stack. Outgoing message will be stored indefinitely to support multi-users. """ with self._outgoing_lock: self._outgoing.append(message) self._outgoing_counter += 1 # Check to see if there are pending queues waiting for the item. if self._outgoing_counter in self._outgoing_pending_queues: for q in self._outgoing_pending_queues[self._outgoing_counter]: q.put(message) del self._outgoing_pending_queues[self._outgoing_counter]
def delete(self, path, data=None, headers=None, params=None): """ Deletes resources at given paths. :rtype: dict :return: Empty dictionary to have consistent interface. Some of Atlassian REST resources don't return any content. """ self.request('DELETE', path=path, data=data, headers=headers, params=params)
Deletes resources at given paths. :rtype: dict :return: Empty dictionary to have consistent interface. Some of Atlassian REST resources don't return any content.
Below is the the instruction that describes the task: ### Input: Deletes resources at given paths. :rtype: dict :return: Empty dictionary to have consistent interface. Some of Atlassian REST resources don't return any content. ### Response: def delete(self, path, data=None, headers=None, params=None): """ Deletes resources at given paths. :rtype: dict :return: Empty dictionary to have consistent interface. Some of Atlassian REST resources don't return any content. """ self.request('DELETE', path=path, data=data, headers=headers, params=params)
def run(self): """Executes the code of the specified module.""" with utils.ChangeDir(self.dirname): sys.path.insert(0, self.dirname) sys.argv[1:] = self.args runpy.run_module(self.not_suffixed(self.filename), run_name='__main__', alter_sys=True)
Executes the code of the specified module.
Below is the the instruction that describes the task: ### Input: Executes the code of the specified module. ### Response: def run(self): """Executes the code of the specified module.""" with utils.ChangeDir(self.dirname): sys.path.insert(0, self.dirname) sys.argv[1:] = self.args runpy.run_module(self.not_suffixed(self.filename), run_name='__main__', alter_sys=True)
def spsolve(A, b): """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[0] If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:] """ x = UmfpackLU(A).solve(b) if b.ndim == 2 and b.shape[1] == 1: # compatibility with scipy.sparse.spsolve quirk return x.ravel() else: return x
Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[0] If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:]
Below is the the instruction that describes the task: ### Input: Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[0] If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:] ### Response: def spsolve(A, b): """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. Parameters ---------- A : ndarray or sparse matrix The square matrix A will be converted into CSC or CSR form b : ndarray or sparse matrix The matrix or vector representing the right hand side of the equation. Returns ------- x : ndarray or sparse matrix the solution of the sparse linear equation. If b is a vector, then x is a vector of size A.shape[0] If b is a matrix, then x is a matrix of size (A.shape[0],)+b.shape[1:] """ x = UmfpackLU(A).solve(b) if b.ndim == 2 and b.shape[1] == 1: # compatibility with scipy.sparse.spsolve quirk return x.ravel() else: return x
def addcomment(self, invoice_increment_id, comment=None, email=False, include_comment=False): """ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID """ if comment is None: comment = "" return bool( self.call( 'sales_order_invoice.addComment', [invoice_increment_id, comment, email, include_comment] ) )
Add comment to invoice or change its state :param invoice_increment_id: Invoice ID
Below is the the instruction that describes the task: ### Input: Add comment to invoice or change its state :param invoice_increment_id: Invoice ID ### Response: def addcomment(self, invoice_increment_id, comment=None, email=False, include_comment=False): """ Add comment to invoice or change its state :param invoice_increment_id: Invoice ID """ if comment is None: comment = "" return bool( self.call( 'sales_order_invoice.addComment', [invoice_increment_id, comment, email, include_comment] ) )
def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen=random.exponential): """ Time vary coefficients d: the dimension of the dataset timelines: the observational times constant: True for constant coefficients independent: the number of coffients to set to 0 (covariate is ind of survival), or a list of covariates to make indepent. randgen: how scalar coefficients (betas) are sampled. returns a matrix (t,d+1) of coefficients """ t = timelines.shape[0] try: a = np.arange(d) random.shuffle(a) independent = a[:independent] except IndexError: pass n_funcs = len(FUNCS) coefficients = np.zeros((t, d)) data_generators = [] for i in range(d): f = FUNCS[random.randint(0, n_funcs)] if not constant else constant_ if i in independent: beta = 0 else: beta = randgen((1 - constant) * 0.5 / d) coefficients[:, i] = f(timelines, alpha=randgen(2000.0 / t), beta=beta) data_generators.append(f.__doc__) df_coefficients = pd.DataFrame(coefficients, columns=data_generators, index=timelines) return df_coefficients
Time vary coefficients d: the dimension of the dataset timelines: the observational times constant: True for constant coefficients independent: the number of coffients to set to 0 (covariate is ind of survival), or a list of covariates to make indepent. randgen: how scalar coefficients (betas) are sampled. returns a matrix (t,d+1) of coefficients
Below is the the instruction that describes the task: ### Input: Time vary coefficients d: the dimension of the dataset timelines: the observational times constant: True for constant coefficients independent: the number of coffients to set to 0 (covariate is ind of survival), or a list of covariates to make indepent. randgen: how scalar coefficients (betas) are sampled. returns a matrix (t,d+1) of coefficients ### Response: def time_varying_coefficients(d, timelines, constant=False, independent=0, randgen=random.exponential): """ Time vary coefficients d: the dimension of the dataset timelines: the observational times constant: True for constant coefficients independent: the number of coffients to set to 0 (covariate is ind of survival), or a list of covariates to make indepent. randgen: how scalar coefficients (betas) are sampled. returns a matrix (t,d+1) of coefficients """ t = timelines.shape[0] try: a = np.arange(d) random.shuffle(a) independent = a[:independent] except IndexError: pass n_funcs = len(FUNCS) coefficients = np.zeros((t, d)) data_generators = [] for i in range(d): f = FUNCS[random.randint(0, n_funcs)] if not constant else constant_ if i in independent: beta = 0 else: beta = randgen((1 - constant) * 0.5 / d) coefficients[:, i] = f(timelines, alpha=randgen(2000.0 / t), beta=beta) data_generators.append(f.__doc__) df_coefficients = pd.DataFrame(coefficients, columns=data_generators, index=timelines) return df_coefficients
def _embedding_metric_mds(matrix, dimensions=3): """ Private method to calculate MMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array) """ mds = sklearn.manifold.MDS(n_components=dimensions, dissimilarity='precomputed', metric=True) mds.fit(matrix) return mds.embedding_
Private method to calculate MMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array)
Below is the the instruction that describes the task: ### Input: Private method to calculate MMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array) ### Response: def _embedding_metric_mds(matrix, dimensions=3): """ Private method to calculate MMDS embedding :param dimensions: (int) :return: coordinate matrix (np.array) """ mds = sklearn.manifold.MDS(n_components=dimensions, dissimilarity='precomputed', metric=True) mds.fit(matrix) return mds.embedding_
def to_json(cls, config, compact=False, indent=2, level=0): """Convert HOCON input into a JSON output :return: JSON string representation :type return: basestring """ lines = "" if isinstance(config, ConfigTree): if len(config) == 0: lines += '{}' else: lines += '{\n' bet_lines = [] for key, item in config.items(): bet_lines.append('{indent}"{key}": {value}'.format( indent=''.rjust((level + 1) * indent, ' '), key=key.strip('"'), # for dotted keys enclosed with "" to not be interpreted as nested key value=cls.to_json(item, compact, indent, level + 1)) ) lines += ',\n'.join(bet_lines) lines += '\n{indent}}}'.format(indent=''.rjust(level * indent, ' ')) elif isinstance(config, list): if len(config) == 0: lines += '[]' else: lines += '[\n' bet_lines = [] for item in config: bet_lines.append('{indent}{value}'.format( indent=''.rjust((level + 1) * indent, ' '), value=cls.to_json(item, compact, indent, level + 1)) ) lines += ',\n'.join(bet_lines) lines += '\n{indent}]'.format(indent=''.rjust(level * indent, ' ')) elif isinstance(config, basestring): lines = json.dumps(config) elif config is None or isinstance(config, NoneValue): lines = 'null' elif config is True: lines = 'true' elif config is False: lines = 'false' else: lines = str(config) return lines
Convert HOCON input into a JSON output :return: JSON string representation :type return: basestring
Below is the the instruction that describes the task: ### Input: Convert HOCON input into a JSON output :return: JSON string representation :type return: basestring ### Response: def to_json(cls, config, compact=False, indent=2, level=0): """Convert HOCON input into a JSON output :return: JSON string representation :type return: basestring """ lines = "" if isinstance(config, ConfigTree): if len(config) == 0: lines += '{}' else: lines += '{\n' bet_lines = [] for key, item in config.items(): bet_lines.append('{indent}"{key}": {value}'.format( indent=''.rjust((level + 1) * indent, ' '), key=key.strip('"'), # for dotted keys enclosed with "" to not be interpreted as nested key value=cls.to_json(item, compact, indent, level + 1)) ) lines += ',\n'.join(bet_lines) lines += '\n{indent}}}'.format(indent=''.rjust(level * indent, ' ')) elif isinstance(config, list): if len(config) == 0: lines += '[]' else: lines += '[\n' bet_lines = [] for item in config: bet_lines.append('{indent}{value}'.format( indent=''.rjust((level + 1) * indent, ' '), value=cls.to_json(item, compact, indent, level + 1)) ) lines += ',\n'.join(bet_lines) lines += '\n{indent}]'.format(indent=''.rjust(level * indent, ' ')) elif isinstance(config, basestring): lines = json.dumps(config) elif config is None or isinstance(config, NoneValue): lines = 'null' elif config is True: lines = 'true' elif config is False: lines = 'false' else: lines = str(config) return lines
def _is_variant(self, gemini_variant, ind_objs): """Check if the variant is a variation in any of the individuals Args: gemini_variant (GeminiQueryRow): The gemini variant ind_objs (list(puzzle.models.individual)): A list of individuals to check Returns: bool : If any of the individuals has the variant """ indexes = (ind.ind_index for ind in ind_objs) #Check if any individual have a heterozygous or homozygous variant call for index in indexes: gt_call = gemini_variant['gt_types'][index] if (gt_call == 1 or gt_call == 3): return True return False
Check if the variant is a variation in any of the individuals Args: gemini_variant (GeminiQueryRow): The gemini variant ind_objs (list(puzzle.models.individual)): A list of individuals to check Returns: bool : If any of the individuals has the variant
Below is the the instruction that describes the task: ### Input: Check if the variant is a variation in any of the individuals Args: gemini_variant (GeminiQueryRow): The gemini variant ind_objs (list(puzzle.models.individual)): A list of individuals to check Returns: bool : If any of the individuals has the variant ### Response: def _is_variant(self, gemini_variant, ind_objs): """Check if the variant is a variation in any of the individuals Args: gemini_variant (GeminiQueryRow): The gemini variant ind_objs (list(puzzle.models.individual)): A list of individuals to check Returns: bool : If any of the individuals has the variant """ indexes = (ind.ind_index for ind in ind_objs) #Check if any individual have a heterozygous or homozygous variant call for index in indexes: gt_call = gemini_variant['gt_types'][index] if (gt_call == 1 or gt_call == 3): return True return False
def b58decode(s, errors='strict'): "Decode a base58-encoding string, returning bytes." if not s: return (b'', 0) # Convert the string to an integer n = 0 for c in s: n *= 58 if c not in b58digits: raise InvalidBase58Error(u"character %r is not a valid base58 " u"character" % c) digit = b58digits.index(c) n += digit # Convert the integer to bytes res = BigInteger(n).serialize((n.bit_length()+7)//8 or 1) # Add padding back. pad = 0 for c in s[:-1]: if c == b58digits[0]: pad += 1 else: break return (b'\x00' * pad + res, len(s))
Decode a base58-encoding string, returning bytes.
Below is the the instruction that describes the task: ### Input: Decode a base58-encoding string, returning bytes. ### Response: def b58decode(s, errors='strict'): "Decode a base58-encoding string, returning bytes." if not s: return (b'', 0) # Convert the string to an integer n = 0 for c in s: n *= 58 if c not in b58digits: raise InvalidBase58Error(u"character %r is not a valid base58 " u"character" % c) digit = b58digits.index(c) n += digit # Convert the integer to bytes res = BigInteger(n).serialize((n.bit_length()+7)//8 or 1) # Add padding back. pad = 0 for c in s[:-1]: if c == b58digits[0]: pad += 1 else: break return (b'\x00' * pad + res, len(s))
def fermi_fourier_trans_inverse_conjugate_4(qubits): """We will need to map the momentum states in the reversed order for spin-down states to the position picture. This transformation can be simply implemented the complex conjugate of the former one. We only need to change the S gate to S* = S ** 3. Args: qubits: list of four qubits """ yield fswap(qubits[1], qubits[2]), yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2]) yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield cirq.S(qubits[2]) ** 3 yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2])
We will need to map the momentum states in the reversed order for spin-down states to the position picture. This transformation can be simply implemented the complex conjugate of the former one. We only need to change the S gate to S* = S ** 3. Args: qubits: list of four qubits
Below is the the instruction that describes the task: ### Input: We will need to map the momentum states in the reversed order for spin-down states to the position picture. This transformation can be simply implemented the complex conjugate of the former one. We only need to change the S gate to S* = S ** 3. Args: qubits: list of four qubits ### Response: def fermi_fourier_trans_inverse_conjugate_4(qubits): """We will need to map the momentum states in the reversed order for spin-down states to the position picture. This transformation can be simply implemented the complex conjugate of the former one. We only need to change the S gate to S* = S ** 3. Args: qubits: list of four qubits """ yield fswap(qubits[1], qubits[2]), yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2]) yield fermi_fourier_trans_2(qubits[0], qubits[1]) yield cirq.S(qubits[2]) ** 3 yield fermi_fourier_trans_2(qubits[2], qubits[3]) yield fswap(qubits[1], qubits[2])
def point_normal_cloud(self, camera_intr): """Computes a PointNormalCloud from the depth image. Parameters ---------- camera_intr : :obj:`CameraIntrinsics` The camera parameters on which this depth image was taken. Returns ------- :obj:`autolab_core.PointNormalCloud` A PointNormalCloud created from the depth image. """ point_cloud_im = camera_intr.deproject_to_image(self) normal_cloud_im = point_cloud_im.normal_cloud_im() point_cloud = point_cloud_im.to_point_cloud() normal_cloud = normal_cloud_im.to_normal_cloud() return PointNormalCloud( point_cloud.data, normal_cloud.data, frame=self._frame)
Computes a PointNormalCloud from the depth image. Parameters ---------- camera_intr : :obj:`CameraIntrinsics` The camera parameters on which this depth image was taken. Returns ------- :obj:`autolab_core.PointNormalCloud` A PointNormalCloud created from the depth image.
Below is the the instruction that describes the task: ### Input: Computes a PointNormalCloud from the depth image. Parameters ---------- camera_intr : :obj:`CameraIntrinsics` The camera parameters on which this depth image was taken. Returns ------- :obj:`autolab_core.PointNormalCloud` A PointNormalCloud created from the depth image. ### Response: def point_normal_cloud(self, camera_intr): """Computes a PointNormalCloud from the depth image. Parameters ---------- camera_intr : :obj:`CameraIntrinsics` The camera parameters on which this depth image was taken. Returns ------- :obj:`autolab_core.PointNormalCloud` A PointNormalCloud created from the depth image. """ point_cloud_im = camera_intr.deproject_to_image(self) normal_cloud_im = point_cloud_im.normal_cloud_im() point_cloud = point_cloud_im.to_point_cloud() normal_cloud = normal_cloud_im.to_normal_cloud() return PointNormalCloud( point_cloud.data, normal_cloud.data, frame=self._frame)
def p_while_sentence(p): """ statement : while_start co_statements_co label_end_while | while_start program_co label_end_while """ gl.LOOPS.pop() q = make_block(p[2], p[3]) if is_number(p[1]) and p[1].value: if q is None: warning(p[1].lineno, "Condition is always true and leads to an infinite loop.") else: warning(p[1].lineno, "Condition is always true and might lead to an infinite loop.") p[0] = make_sentence('WHILE', p[1], q)
statement : while_start co_statements_co label_end_while | while_start program_co label_end_while
Below is the the instruction that describes the task: ### Input: statement : while_start co_statements_co label_end_while | while_start program_co label_end_while ### Response: def p_while_sentence(p): """ statement : while_start co_statements_co label_end_while | while_start program_co label_end_while """ gl.LOOPS.pop() q = make_block(p[2], p[3]) if is_number(p[1]) and p[1].value: if q is None: warning(p[1].lineno, "Condition is always true and leads to an infinite loop.") else: warning(p[1].lineno, "Condition is always true and might lead to an infinite loop.") p[0] = make_sentence('WHILE', p[1], q)
def tcase_comment(tcase): """ Extract testcase comment section / testcase description @returns the testcase-comment from the tcase["fpath"] as a list of strings """ src = open(tcase["fpath"]).read() if len(src) < 3: cij.err("rprtr::tcase_comment: invalid src, tcase: %r" % tcase["name"]) return None ext = os.path.splitext(tcase["fpath"])[-1] if ext not in [".sh", ".py"]: cij.err("rprtr::tcase_comment: invalid ext: %r, tcase: %r" % ( ext, tcase["name"] )) return None comment = [] for line in src.splitlines()[2:]: if ext == ".sh" and not line.startswith("#"): break elif ext == ".py" and not '"""' in line: break comment.append(line) return comment
Extract testcase comment section / testcase description @returns the testcase-comment from the tcase["fpath"] as a list of strings
Below is the the instruction that describes the task: ### Input: Extract testcase comment section / testcase description @returns the testcase-comment from the tcase["fpath"] as a list of strings ### Response: def tcase_comment(tcase): """ Extract testcase comment section / testcase description @returns the testcase-comment from the tcase["fpath"] as a list of strings """ src = open(tcase["fpath"]).read() if len(src) < 3: cij.err("rprtr::tcase_comment: invalid src, tcase: %r" % tcase["name"]) return None ext = os.path.splitext(tcase["fpath"])[-1] if ext not in [".sh", ".py"]: cij.err("rprtr::tcase_comment: invalid ext: %r, tcase: %r" % ( ext, tcase["name"] )) return None comment = [] for line in src.splitlines()[2:]: if ext == ".sh" and not line.startswith("#"): break elif ext == ".py" and not '"""' in line: break comment.append(line) return comment
def evaluate_barycentric(self, lambda1, lambda2, lambda3, _verify=True): r"""Compute a point on the surface. Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)`. .. image:: ../../images/surface_evaluate_barycentric.png :align: center .. testsetup:: surface-barycentric, surface-barycentric-fail1, surface-barycentric-fail2, surface-barycentric-no-verify import numpy as np import bezier nodes = np.asfortranarray([ [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ]) surface = bezier.Surface(nodes, degree=2) .. doctest:: surface-barycentric :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], ... [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_barycentric(0.125, 0.125, 0.75) >>> point array([[0.265625 ], [0.73046875]]) .. testcleanup:: surface-barycentric import make_images make_images.surface_evaluate_barycentric(surface, point) However, this can't be used for points **outside** the reference triangle: .. doctest:: surface-barycentric-fail1 >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5) Traceback (most recent call last): ... ValueError: ('Weights must be positive', -0.25, 0.75, 0.5) or for non-barycentric coordinates; .. doctest:: surface-barycentric-fail2 >>> surface.evaluate_barycentric(0.25, 0.25, 0.25) Traceback (most recent call last): ... ValueError: ('Weights do not sum to 1', 0.25, 0.25, 0.25) However, these "invalid" inputs can be used if ``_verify`` is :data:`False`. .. doctest:: surface-barycentric-no-verify :options: +NORMALIZE_WHITESPACE >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5, _verify=False) array([[0.6875 ], [0.546875]]) >>> surface.evaluate_barycentric(0.25, 0.25, 0.25, _verify=False) array([[0.203125], [0.1875 ]]) Args: lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the barycentric coordinates should be verified as summing to one and all non-negative (i.e. verified as barycentric). Can either be used to evaluate at points outside the domain, or to save time when the caller already knows the input is verified. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array with a single column). Raises: ValueError: If the weights are not valid barycentric coordinates, i.e. they don't sum to ``1``. (Won't raise if ``_verify=False``.) ValueError: If some weights are negative. (Won't raise if ``_verify=False``.) """ if _verify: self._verify_barycentric(lambda1, lambda2, lambda3) return _surface_helpers.evaluate_barycentric( self._nodes, self._degree, lambda1, lambda2, lambda3 )
r"""Compute a point on the surface. Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)`. .. image:: ../../images/surface_evaluate_barycentric.png :align: center .. testsetup:: surface-barycentric, surface-barycentric-fail1, surface-barycentric-fail2, surface-barycentric-no-verify import numpy as np import bezier nodes = np.asfortranarray([ [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ]) surface = bezier.Surface(nodes, degree=2) .. doctest:: surface-barycentric :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], ... [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_barycentric(0.125, 0.125, 0.75) >>> point array([[0.265625 ], [0.73046875]]) .. testcleanup:: surface-barycentric import make_images make_images.surface_evaluate_barycentric(surface, point) However, this can't be used for points **outside** the reference triangle: .. doctest:: surface-barycentric-fail1 >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5) Traceback (most recent call last): ... ValueError: ('Weights must be positive', -0.25, 0.75, 0.5) or for non-barycentric coordinates; .. doctest:: surface-barycentric-fail2 >>> surface.evaluate_barycentric(0.25, 0.25, 0.25) Traceback (most recent call last): ... ValueError: ('Weights do not sum to 1', 0.25, 0.25, 0.25) However, these "invalid" inputs can be used if ``_verify`` is :data:`False`. .. doctest:: surface-barycentric-no-verify :options: +NORMALIZE_WHITESPACE >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5, _verify=False) array([[0.6875 ], [0.546875]]) >>> surface.evaluate_barycentric(0.25, 0.25, 0.25, _verify=False) array([[0.203125], [0.1875 ]]) Args: lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the barycentric coordinates should be verified as summing to one and all non-negative (i.e. verified as barycentric). Can either be used to evaluate at points outside the domain, or to save time when the caller already knows the input is verified. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array with a single column). Raises: ValueError: If the weights are not valid barycentric coordinates, i.e. they don't sum to ``1``. (Won't raise if ``_verify=False``.) ValueError: If some weights are negative. (Won't raise if ``_verify=False``.)
Below is the the instruction that describes the task: ### Input: r"""Compute a point on the surface. Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)`. .. image:: ../../images/surface_evaluate_barycentric.png :align: center .. testsetup:: surface-barycentric, surface-barycentric-fail1, surface-barycentric-fail2, surface-barycentric-no-verify import numpy as np import bezier nodes = np.asfortranarray([ [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ]) surface = bezier.Surface(nodes, degree=2) .. doctest:: surface-barycentric :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], ... [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_barycentric(0.125, 0.125, 0.75) >>> point array([[0.265625 ], [0.73046875]]) .. testcleanup:: surface-barycentric import make_images make_images.surface_evaluate_barycentric(surface, point) However, this can't be used for points **outside** the reference triangle: .. doctest:: surface-barycentric-fail1 >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5) Traceback (most recent call last): ... ValueError: ('Weights must be positive', -0.25, 0.75, 0.5) or for non-barycentric coordinates; .. doctest:: surface-barycentric-fail2 >>> surface.evaluate_barycentric(0.25, 0.25, 0.25) Traceback (most recent call last): ... ValueError: ('Weights do not sum to 1', 0.25, 0.25, 0.25) However, these "invalid" inputs can be used if ``_verify`` is :data:`False`. .. doctest:: surface-barycentric-no-verify :options: +NORMALIZE_WHITESPACE >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5, _verify=False) array([[0.6875 ], [0.546875]]) >>> surface.evaluate_barycentric(0.25, 0.25, 0.25, _verify=False) array([[0.203125], [0.1875 ]]) Args: lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the barycentric coordinates should be verified as summing to one and all non-negative (i.e. verified as barycentric). Can either be used to evaluate at points outside the domain, or to save time when the caller already knows the input is verified. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array with a single column). Raises: ValueError: If the weights are not valid barycentric coordinates, i.e. they don't sum to ``1``. (Won't raise if ``_verify=False``.) ValueError: If some weights are negative. (Won't raise if ``_verify=False``.) ### Response: def evaluate_barycentric(self, lambda1, lambda2, lambda3, _verify=True): r"""Compute a point on the surface. Evaluates :math:`B\left(\lambda_1, \lambda_2, \lambda_3\right)`. .. image:: ../../images/surface_evaluate_barycentric.png :align: center .. testsetup:: surface-barycentric, surface-barycentric-fail1, surface-barycentric-fail2, surface-barycentric-no-verify import numpy as np import bezier nodes = np.asfortranarray([ [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ]) surface = bezier.Surface(nodes, degree=2) .. doctest:: surface-barycentric :options: +NORMALIZE_WHITESPACE >>> nodes = np.asfortranarray([ ... [0.0, 0.5, 1.0 , 0.125, 0.375, 0.25], ... [0.0, 0.0, 0.25, 0.5 , 0.375, 1.0 ], ... ]) >>> surface = bezier.Surface(nodes, degree=2) >>> point = surface.evaluate_barycentric(0.125, 0.125, 0.75) >>> point array([[0.265625 ], [0.73046875]]) .. testcleanup:: surface-barycentric import make_images make_images.surface_evaluate_barycentric(surface, point) However, this can't be used for points **outside** the reference triangle: .. doctest:: surface-barycentric-fail1 >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5) Traceback (most recent call last): ... ValueError: ('Weights must be positive', -0.25, 0.75, 0.5) or for non-barycentric coordinates; .. doctest:: surface-barycentric-fail2 >>> surface.evaluate_barycentric(0.25, 0.25, 0.25) Traceback (most recent call last): ... ValueError: ('Weights do not sum to 1', 0.25, 0.25, 0.25) However, these "invalid" inputs can be used if ``_verify`` is :data:`False`. .. doctest:: surface-barycentric-no-verify :options: +NORMALIZE_WHITESPACE >>> surface.evaluate_barycentric(-0.25, 0.75, 0.5, _verify=False) array([[0.6875 ], [0.546875]]) >>> surface.evaluate_barycentric(0.25, 0.25, 0.25, _verify=False) array([[0.203125], [0.1875 ]]) Args: lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. _verify (Optional[bool]): Indicates if the barycentric coordinates should be verified as summing to one and all non-negative (i.e. verified as barycentric). Can either be used to evaluate at points outside the domain, or to save time when the caller already knows the input is verified. Defaults to :data:`True`. Returns: numpy.ndarray: The point on the surface (as a two dimensional NumPy array with a single column). Raises: ValueError: If the weights are not valid barycentric coordinates, i.e. they don't sum to ``1``. (Won't raise if ``_verify=False``.) ValueError: If some weights are negative. (Won't raise if ``_verify=False``.) """ if _verify: self._verify_barycentric(lambda1, lambda2, lambda3) return _surface_helpers.evaluate_barycentric( self._nodes, self._degree, lambda1, lambda2, lambda3 )
def irafcrop(self, irafcropstring): """ This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). """ irafcropstring = irafcropstring[1:-1] # removing the [ ] ranges = irafcropstring.split(",") xr = ranges[0].split(":") yr = ranges[1].split(":") xmin = int(xr[0]) xmax = int(xr[1])+1 ymin = int(yr[0]) ymax = int(yr[1])+1 self.crop(xmin, xmax, ymin, ymax)
This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99).
Below is the the instruction that describes the task: ### Input: This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). ### Response: def irafcrop(self, irafcropstring): """ This is a wrapper around crop(), similar to iraf imcopy, using iraf conventions (100:199 will be 100 pixels, not 99). """ irafcropstring = irafcropstring[1:-1] # removing the [ ] ranges = irafcropstring.split(",") xr = ranges[0].split(":") yr = ranges[1].split(":") xmin = int(xr[0]) xmax = int(xr[1])+1 ymin = int(yr[0]) ymax = int(yr[1])+1 self.crop(xmin, xmax, ymin, ymax)