code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def set_dimmer(self, dimmer, transition_time=None): """Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None) """ values = { ATTR_LIGHT_DIMMER: dimmer, } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values)
Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None)
Below is the the instruction that describes the task: ### Input: Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None) ### Response: def set_dimmer(self, dimmer, transition_time=None): """Set dimmer value of a group. dimmer: Integer between 0..255 transition_time: Integer representing tenth of a second (default None) """ values = { ATTR_LIGHT_DIMMER: dimmer, } if transition_time is not None: values[ATTR_TRANSITION_TIME] = transition_time return self.set_values(values)
def next_frame_sv2p_cutoff(): """SV2P model with additional cutoff in L2 loss for environments like pong.""" hparams = next_frame_sv2p() hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 return hparams
SV2P model with additional cutoff in L2 loss for environments like pong.
Below is the the instruction that describes the task: ### Input: SV2P model with additional cutoff in L2 loss for environments like pong. ### Response: def next_frame_sv2p_cutoff(): """SV2P model with additional cutoff in L2 loss for environments like pong.""" hparams = next_frame_sv2p() hparams.video_modality_loss_cutoff = 0.4 hparams.video_num_input_frames = 4 hparams.video_num_target_frames = 1 return hparams
def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return:
Below is the the instruction that describes the task: ### Input: This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: ### Response: def _process_ortholog_classes(self, limit=None): """ This method add the KEGG orthology classes to the graph. If there's an embedded enzyme commission number, that is added as an xref. Triples created: <orthology_class_id> is a class <orthology_class_id> has label <orthology_symbols> <orthology_class_id> has description <orthology_description> :param limit: :return: """ LOG.info("Processing ortholog classes") if self.test_mode: graph = self.testgraph else: graph = self.graph model = Model(graph) line_counter = 0 raw = '/'.join((self.rawdir, self.files['ortholog_classes']['file'])) with open(raw, 'r', encoding="iso-8859-1") as csvfile: filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"') for row in filereader: line_counter += 1 (orthology_class_id, orthology_class_name) = row if self.test_mode and orthology_class_id \ not in self.test_ids['orthology_classes']: continue # The orthology class is essentially a KEGG gene ID # that is species agnostic. # Add the ID and label as a gene family class other_labels = re.split(r'[;,]', orthology_class_name) # the first one is the label we'll use orthology_label = other_labels[0] orthology_class_id = 'KEGG-'+orthology_class_id.strip() orthology_type = self.globaltt['gene_family'] model.addClassToGraph( orthology_class_id, orthology_label, orthology_type) if len(other_labels) > 1: # add the rest as synonyms # todo skip the first for s in other_labels: model.addSynonym(orthology_class_id, s.strip()) # add the last one as the description d = other_labels[len(other_labels)-1] model.addDescription(orthology_class_id, d) # add the enzyme commission number (EC:1.2.99.5)as an xref # sometimes there's two, like [EC:1.3.5.1 1.3.5.4] # can also have a dash, like EC:1.10.3.- ec_matches = re.findall(r'((?:\d+|\.|-){5,7})', d) if ec_matches is not None: for ecm in ec_matches: model.addXref(orthology_class_id, 'EC:' + ecm) if not self.test_mode and limit is not None and line_counter > limit: break LOG.info("Done with ortholog classes") return
def prob_t_profiles(self, profile_pair, multiplicity, t, return_log=False, ignore_gaps=True): ''' Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result ''' if t<0: logP = -ttconf.BIG_NUMBER else: Qt = self.expQt(t) if len(Qt.shape)==3: res = np.einsum('ai,ija,aj->a', profile_pair[1], Qt, profile_pair[0]) else: res = np.einsum('ai,ij,aj->a', profile_pair[1], Qt, profile_pair[0]) if ignore_gaps and (self.gap_index is not None): # calculate the probability that neither outgroup/node has a gap non_gap_frac = (1-profile_pair[0][:,self.gap_index])*(1-profile_pair[1][:,self.gap_index]) # weigh log LH by the non-gap probability logP = np.sum(multiplicity*np.log(res)*non_gap_frac) else: logP = np.sum(multiplicity*np.log(res)) return logP if return_log else np.exp(logP)
Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result
Below is the the instruction that describes the task: ### Input: Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result ### Response: def prob_t_profiles(self, profile_pair, multiplicity, t, return_log=False, ignore_gaps=True): ''' Calculate the probability of observing a node pair at a distance t Parameters ---------- profile_pair: numpy arrays Probability distributions of the nucleotides at either end of the branch. pp[0] = parent, pp[1] = child multiplicity : numpy array The number of times an alignment pattern is observed t : float Length of the branch separating parent and child ignore_gaps: bool If True, ignore mutations to and from gaps in distance calculations return_log : bool Whether or not to exponentiate the result ''' if t<0: logP = -ttconf.BIG_NUMBER else: Qt = self.expQt(t) if len(Qt.shape)==3: res = np.einsum('ai,ija,aj->a', profile_pair[1], Qt, profile_pair[0]) else: res = np.einsum('ai,ij,aj->a', profile_pair[1], Qt, profile_pair[0]) if ignore_gaps and (self.gap_index is not None): # calculate the probability that neither outgroup/node has a gap non_gap_frac = (1-profile_pair[0][:,self.gap_index])*(1-profile_pair[1][:,self.gap_index]) # weigh log LH by the non-gap probability logP = np.sum(multiplicity*np.log(res)*non_gap_frac) else: logP = np.sum(multiplicity*np.log(res)) return logP if return_log else np.exp(logP)
def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``.
Below is the the instruction that describes the task: ### Input: Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. ### Response: def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
def snapengage(parser, token): """ SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting.
Below is the the instruction that describes the task: ### Input: SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. ### Response: def snapengage(parser, token): """ SnapEngage set-up template tag. Renders Javascript code to set-up SnapEngage chat. You must supply your widget ID in the ``SNAPENGAGE_WIDGET_ID`` setting. """ bits = token.split_contents() if len(bits) > 1: raise TemplateSyntaxError("'%s' takes no arguments" % bits[0]) return SnapEngageNode()
def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True): """Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output. """ if randomize and choices: choices = random.sample(choices, len(choices)) correct = False while not correct: if choices: assert len(answer) == 1, 'Choices must have 1 line of output' choice_map = self._display_choices(choices) question_timestamp = datetime.now() input_lines = [] for line_number, line in enumerate(answer): if len(answer) == 1: prompt = self.PROMPT else: prompt = '(line {}){}'.format(line_number + 1, self.PROMPT) student_input = format.normalize(self._input(prompt)) self._add_history(student_input) if student_input in self.EXIT_INPUTS: raise EOFError if choices and student_input in choice_map: student_input = choice_map[student_input] correct_answer = self._verify_student_input(student_input, line) if correct_answer: input_lines.append(correct_answer) else: input_lines.append(student_input) break else: correct = True tg_id = -1 misU_count_dict = {} rationale = "Unknown - Default Value" if not correct: guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines, self.hash_key) misU_count_dict, tg_id, printed_msg, rationale = guidance_data else: rationale = self.guidance_util.prompt_with_prob() print("-- OK! --") printed_msg = ["-- OK! --"] self.analytics.append({ 'id': unique_id, 'case_id': case_id, 'question timestamp': self.unix_time(question_timestamp), 'answer timestamp': self.unix_time(datetime.now()), 'prompt': question_prompt, 'answer': input_lines, 'correct': correct, 'treatment group id': tg_id, 'rationale': rationale, 'misU count': misU_count_dict, 'printed msg': printed_msg }) print() return input_lines
Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output.
Below is the the instruction that describes the task: ### Input: Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output. ### Response: def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True): """Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output. """ if randomize and choices: choices = random.sample(choices, len(choices)) correct = False while not correct: if choices: assert len(answer) == 1, 'Choices must have 1 line of output' choice_map = self._display_choices(choices) question_timestamp = datetime.now() input_lines = [] for line_number, line in enumerate(answer): if len(answer) == 1: prompt = self.PROMPT else: prompt = '(line {}){}'.format(line_number + 1, self.PROMPT) student_input = format.normalize(self._input(prompt)) self._add_history(student_input) if student_input in self.EXIT_INPUTS: raise EOFError if choices and student_input in choice_map: student_input = choice_map[student_input] correct_answer = self._verify_student_input(student_input, line) if correct_answer: input_lines.append(correct_answer) else: input_lines.append(student_input) break else: correct = True tg_id = -1 misU_count_dict = {} rationale = "Unknown - Default Value" if not correct: guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines, self.hash_key) misU_count_dict, tg_id, printed_msg, rationale = guidance_data else: rationale = self.guidance_util.prompt_with_prob() print("-- OK! --") printed_msg = ["-- OK! --"] self.analytics.append({ 'id': unique_id, 'case_id': case_id, 'question timestamp': self.unix_time(question_timestamp), 'answer timestamp': self.unix_time(datetime.now()), 'prompt': question_prompt, 'answer': input_lines, 'correct': correct, 'treatment group id': tg_id, 'rationale': rationale, 'misU count': misU_count_dict, 'printed msg': printed_msg }) print() return input_lines
def __get_type_args(for_type=None, for_types=None): """Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. """ if for_type: if for_types: raise ValueError("Cannot pass both for_type and for_types.") for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError("for_types must be passed as a tuple of " "types (classes).") else: raise ValueError("Must pass either for_type or for_types.") return for_types
Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate.
Below is the the instruction that describes the task: ### Input: Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. ### Response: def __get_type_args(for_type=None, for_types=None): """Parse the arguments and return a tuple of types to implement for. Raises: ValueError or TypeError as appropriate. """ if for_type: if for_types: raise ValueError("Cannot pass both for_type and for_types.") for_types = (for_type,) elif for_types: if not isinstance(for_types, tuple): raise TypeError("for_types must be passed as a tuple of " "types (classes).") else: raise ValueError("Must pass either for_type or for_types.") return for_types
def Environ(variable, default): """A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable. """ precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if value is None: return default if PY2: # TODO(hanuszczak): https://github.com/google/pytype/issues/127 value = value.decode("utf-8") # pytype: disable=attribute-error return value
A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable.
Below is the the instruction that describes the task: ### Input: A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable. ### Response: def Environ(variable, default): """A wrapper for `os.environ.get` that works the same way in both Pythons. Args: variable: A name of the variable to get the value of. default: A default value to return in case no value for the given variable is set. Returns: An environment value of the given variable. """ precondition.AssertType(variable, Text) value = os.environ.get(variable, default) if value is None: return default if PY2: # TODO(hanuszczak): https://github.com/google/pytype/issues/127 value = value.decode("utf-8") # pytype: disable=attribute-error return value
def get_pipeline(self, id=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node """ return self.transport.perform_request('GET', _make_path('_ingest', 'pipeline', id), params=params)
`<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node
Below is the the instruction that describes the task: ### Input: `<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node ### Response: def get_pipeline(self, id=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/plugins/current/ingest.html>`_ :arg id: Comma separated list of pipeline ids. Wildcards supported :arg master_timeout: Explicit operation timeout for connection to master node """ return self.transport.perform_request('GET', _make_path('_ingest', 'pipeline', id), params=params)
def make_python_xref_nodes(py_typestr, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` """ if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type`
Below is the the instruction that describes the task: ### Input: Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` ### Response: def make_python_xref_nodes(py_typestr, state, hide_namespace=False): """Make docutils nodes containing a cross-reference to a Python object. Parameters ---------- py_typestr : `str` Name of the Python object. For example ``'mypackage.mymodule.MyClass'``. If you have the object itself, or its type, use the `make_python_xref_nodes_for_type` function instead. state : ``docutils.statemachine.State`` Usually the directive's ``state`` attribute. hide_namespace : `bool`, optional If `True`, the namespace of the object is hidden in the rendered cross reference. Internally, this uses ``:py:obj:`~{py_obj}` (note tilde). Returns ------- instance from ``docutils.nodes`` Docutils node representing the cross reference. Examples -------- If called from within a directive: .. code-block:: python make_python_xref_nodes('numpy.sin', self.state) See also -------- `make_python_xref_nodes_for_type` """ if hide_namespace: template = ':py:obj:`~{}`\n' else: template = ':py:obj:`{}`\n' xref_text = template.format(py_typestr) return parse_rst_content(xref_text, state)
def _expand_wildcard_placeholder(original_module, originals_map, placeholder): """Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict) """ originals = originals_map.values() if original_module["all"] is not None: originals = [] for name in original_module["all"]: if name == "__all__": continue if name not in originals_map: msg = "Invalid __all__ entry {0} in {1}".format( name, original_module["name"] ) LOGGER.warning(msg) continue originals.append(originals_map[name]) placeholders = [] for original in originals: new_full_name = placeholder["full_name"].replace("*", original["name"]) new_original_path = placeholder["original_path"].replace("*", original["name"]) if "original_path" in original: new_original_path = original["original_path"] new_placeholder = dict( placeholder, name=original["name"], full_name=new_full_name, original_path=new_original_path, ) placeholders.append(new_placeholder) return placeholders
Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict)
Below is the the instruction that describes the task: ### Input: Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict) ### Response: def _expand_wildcard_placeholder(original_module, originals_map, placeholder): """Expand a wildcard placeholder to a sequence of named placeholders. :param original_module: The data dictionary of the module that the placeholder is imported from. :type original_module: dict :param originals_map: A map of the names of children under the module to their data dictionaries. :type originals_map: dict(str, dict) :param placeholder: The wildcard placeholder to expand. :type placeholder: dict :returns: The placeholders that the wildcard placeholder represents. :rtype: list(dict) """ originals = originals_map.values() if original_module["all"] is not None: originals = [] for name in original_module["all"]: if name == "__all__": continue if name not in originals_map: msg = "Invalid __all__ entry {0} in {1}".format( name, original_module["name"] ) LOGGER.warning(msg) continue originals.append(originals_map[name]) placeholders = [] for original in originals: new_full_name = placeholder["full_name"].replace("*", original["name"]) new_original_path = placeholder["original_path"].replace("*", original["name"]) if "original_path" in original: new_original_path = original["original_path"] new_placeholder = dict( placeholder, name=original["name"], full_name=new_full_name, original_path=new_original_path, ) placeholders.append(new_placeholder) return placeholders
def write_bif(self, filename): """ Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif') """ writer = self.__str__() with open(filename, 'w') as fout: fout.write(writer)
Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif')
Below is the the instruction that describes the task: ### Input: Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif') ### Response: def write_bif(self, filename): """ Writes the BIF data into a file Parameters ---------- filename : Name of the file Example ------- >>> from pgmpy.readwrite import BIFReader, BIFWriter >>> model = BIFReader('dog-problem.bif').get_model() >>> writer = BIFWriter(model) >>> writer.write_bif(filname='test_file.bif') """ writer = self.__str__() with open(filename, 'w') as fout: fout.write(writer)
def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument """Calculate the final coinc ranking statistic""" # Approximate log likelihood ratio by summing single-ifo negative # log noise likelihoods loglr = - s0 - s1 # add squares of threshold stat values via idealized Gaussian formula threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos] loglr += sum([t**2. / 2. for t in threshes]) # convert back to a coinc-SNR-like statistic # via log likelihood ratio \propto rho_c^2 / 2 return (2. * loglr) ** 0.5
Calculate the final coinc ranking statistic
Below is the the instruction that describes the task: ### Input: Calculate the final coinc ranking statistic ### Response: def coinc(self, s0, s1, slide, step): # pylint:disable=unused-argument """Calculate the final coinc ranking statistic""" # Approximate log likelihood ratio by summing single-ifo negative # log noise likelihoods loglr = - s0 - s1 # add squares of threshold stat values via idealized Gaussian formula threshes = [self.fits_by_tid[i]['thresh'] for i in self.ifos] loglr += sum([t**2. / 2. for t in threshes]) # convert back to a coinc-SNR-like statistic # via log likelihood ratio \propto rho_c^2 / 2 return (2. * loglr) ** 0.5
def orip(ip, rc=None, r=None, fl=None, fs=None, ot=None, coe=None, moc=None): # pylint: disable=too-many-arguments, redefined-outer-name, invalid-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session. """ return CONN.OpenReferenceInstancePaths(ip, ResultClass=rc, Role=r, FilterQueryLanguage=fl, FilterQuery=fs, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session.
Below is the the instruction that describes the task: ### Input: This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session. ### Response: def orip(ip, rc=None, r=None, fl=None, fs=None, ot=None, coe=None, moc=None): # pylint: disable=too-many-arguments, redefined-outer-name, invalid-name """ This function is a wrapper for :meth:`~pywbem.WBEMConnection.OpenReferenceInstancePaths`. Open an enumeration session to retrieve the instance paths of the association instances that reference a source instance. Use the :func:`~wbemcli.pip` function to retrieve the next set of instance paths or the :func:`~wbcmeli.ce` function to close the enumeration session before it is complete. Parameters: ip (:class:`~pywbem.CIMInstanceName`): Source instance path. rc (:term:`string`): ResultClass filter: Include only traversals across this association (result) class. `None` means this filter is not applied. r (:term:`string`): Role filter: Include only traversals from this role (= reference name) in source object. `None` means this filter is not applied. fl (:term:`string`): Filter query language to be used for the filter defined in the `fs` parameter. The DMTF-defined Filter Query Language (see :term:`DSP0212`) is specified as "DMTF:FQL". `None` means that no such filtering is peformed. fs (:term:`string`): Filter to apply to objects to be returned. Based on filter query language defined by `fl` parameter. `None` means that no such filtering is peformed. ot (:class:`~pywbem.Uint32`): Operation timeout in seconds. This is the minimum time the WBEM server must keep the enumeration session open between requests on that session. A value of 0 indicates that the server should never time out. The server may reject the proposed value. `None` will cause the server to use its default timeout. coe (:class:`py:bool`): Continue on error flag. `None` will cause the server to use its default of `False`. moc (:class:`~pywbem.Uint32`): Maximum number of objects to return for this operation. `None` will cause the server to use its default of 0. Returns: A :func:`~py:collections.namedtuple` object containing the following named items: * **paths** (list of :class:`~pywbem.CIMInstanceName`): The retrieved instance paths. * **eos** (:class:`py:bool`): `True` if the enumeration session is exhausted after this operation. Otherwise `eos` is `False` and the `context` item is the context object for the next operation on the enumeration session. * **context** (:func:`py:tuple` of server_context, namespace): A context object identifying the open enumeration session, including its current enumeration state, and the namespace. This object must be supplied with the next pull or close operation for this enumeration session. """ return CONN.OpenReferenceInstancePaths(ip, ResultClass=rc, Role=r, FilterQueryLanguage=fl, FilterQuery=fs, OperationTimeout=ot, ContinueOnError=coe, MaxObjectCount=moc)
def bump(self): """ Fix indicator in case of unnanounced departments. """ # read client values = self.client.mget(self.keys.indicator, self.keys.dispenser) indicator, dispenser = map(int, values) # determine active users numbers = range(indicator, dispenser + 1) keys = [self.keys.key(n) for n in numbers] pairs = zip(keys, self.client.mget(*keys)) try: # determine number of first active user number = next(self.keys.number(key) for key, value in pairs if value is not None) except: # set number to next result of incr on dispenser number = dispenser + 1 # set indicator to it if necessary if number != indicator: self.client.set(self.keys.indicator, number) # announce and return it anyway self.announce(number) return number
Fix indicator in case of unnanounced departments.
Below is the the instruction that describes the task: ### Input: Fix indicator in case of unnanounced departments. ### Response: def bump(self): """ Fix indicator in case of unnanounced departments. """ # read client values = self.client.mget(self.keys.indicator, self.keys.dispenser) indicator, dispenser = map(int, values) # determine active users numbers = range(indicator, dispenser + 1) keys = [self.keys.key(n) for n in numbers] pairs = zip(keys, self.client.mget(*keys)) try: # determine number of first active user number = next(self.keys.number(key) for key, value in pairs if value is not None) except: # set number to next result of incr on dispenser number = dispenser + 1 # set indicator to it if necessary if number != indicator: self.client.set(self.keys.indicator, number) # announce and return it anyway self.announce(number) return number
def _complete_el(self, symbol, attribute, fullsymbol): """Suggests a list of completions based on the el_* attributes of the user_context.""" if symbol != fullsymbol: #We have a sym%sym%... chain and the completion just needs to #be a member variable or method of the type being referenced. return self._complete_type_chain(symbol, fullsymbol) if self.context.el_section == "params": #They are in the process of defining a new executable and are #picking the names themselves, return normal word complete. return self._complete_word(symbol, attribute) elif self.context.el_section == "body": if self.context.el_call in ["sub", "fun"]: return self._complete_sig(symbol, attribute) else: return self._complete_word(symbol, attribute) else: return self._complete_word(symbol, attribute)
Suggests a list of completions based on the el_* attributes of the user_context.
Below is the the instruction that describes the task: ### Input: Suggests a list of completions based on the el_* attributes of the user_context. ### Response: def _complete_el(self, symbol, attribute, fullsymbol): """Suggests a list of completions based on the el_* attributes of the user_context.""" if symbol != fullsymbol: #We have a sym%sym%... chain and the completion just needs to #be a member variable or method of the type being referenced. return self._complete_type_chain(symbol, fullsymbol) if self.context.el_section == "params": #They are in the process of defining a new executable and are #picking the names themselves, return normal word complete. return self._complete_word(symbol, attribute) elif self.context.el_section == "body": if self.context.el_call in ["sub", "fun"]: return self._complete_sig(symbol, attribute) else: return self._complete_word(symbol, attribute) else: return self._complete_word(symbol, attribute)
def parse(cls, s): """ Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return: """ try: numbers = map(int, s.split("-")) except (ValueError, AttributeError): raise ValueError("Date string must be in format YYYY-MM-DD") else: numbers = list(numbers) if len(numbers) == 3: return cls(*numbers) raise ValueError("Date string must be in format YYYY-MM-DD")
Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return:
Below is the the instruction that describes the task: ### Input: Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return: ### Response: def parse(cls, s): """ Parse a string to produce a :class:`.Date`. Accepted formats: 'YYYY-MM-DD' :param s: :return: """ try: numbers = map(int, s.split("-")) except (ValueError, AttributeError): raise ValueError("Date string must be in format YYYY-MM-DD") else: numbers = list(numbers) if len(numbers) == 3: return cls(*numbers) raise ValueError("Date string must be in format YYYY-MM-DD")
def p_scalar_namespace_name(p): '''scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name''' if len(p) == 2: p[0] = ast.Constant(p[1], lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.Constant(p[1] + p[2], lineno=p.lineno(1)) else: p[0] = ast.Constant(p[1] + p[2] + p[3], lineno=p.lineno(1))
scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name
Below is the the instruction that describes the task: ### Input: scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name ### Response: def p_scalar_namespace_name(p): '''scalar : namespace_name | NS_SEPARATOR namespace_name | NAMESPACE NS_SEPARATOR namespace_name''' if len(p) == 2: p[0] = ast.Constant(p[1], lineno=p.lineno(1)) elif len(p) == 3: p[0] = ast.Constant(p[1] + p[2], lineno=p.lineno(1)) else: p[0] = ast.Constant(p[1] + p[2] + p[3], lineno=p.lineno(1))
def slicewise(self, fn, *inputs): """Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. """ if fn == tf.add: assert len(inputs) == 2 if isinstance(inputs[0], mtf.LazyAllreduceSum): # sum of LazyAllreduceSum (keep delaying the allreduce) return inputs[0] + inputs[1] # convert all inputs to LaidOutTensor where possible inputs = mtf.convert_args_to_laid_out_tensors(inputs) ret = fn(*[ x.one_slice if isinstance(x, self.LaidOutTensor) else x for x in inputs]) if isinstance(ret, tuple): return tuple([self.LaidOutTensor([t]) for t in ret]) else: return self.LaidOutTensor([ret])
Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.
Below is the the instruction that describes the task: ### Input: Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. ### Response: def slicewise(self, fn, *inputs): """Execute a function in parallel on all slices. Args: fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors. *inputs: a list of inputs. Each input is either a LaidOutTensor or is convertible to a tf.Tensor. Returns: a LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple. """ if fn == tf.add: assert len(inputs) == 2 if isinstance(inputs[0], mtf.LazyAllreduceSum): # sum of LazyAllreduceSum (keep delaying the allreduce) return inputs[0] + inputs[1] # convert all inputs to LaidOutTensor where possible inputs = mtf.convert_args_to_laid_out_tensors(inputs) ret = fn(*[ x.one_slice if isinstance(x, self.LaidOutTensor) else x for x in inputs]) if isinstance(ret, tuple): return tuple([self.LaidOutTensor([t]) for t in ret]) else: return self.LaidOutTensor([ret])
def _extract_docs_other(self): """Extract other specific sections""" if self.dst.style['in'] == 'numpydoc': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) lst = self.dst.numpydoc.get_list_key(data, 'also') lst = self.dst.numpydoc.get_list_key(data, 'ref') lst = self.dst.numpydoc.get_list_key(data, 'note') lst = self.dst.numpydoc.get_list_key(data, 'other') lst = self.dst.numpydoc.get_list_key(data, 'example') lst = self.dst.numpydoc.get_list_key(data, 'attr')
Extract other specific sections
Below is the the instruction that describes the task: ### Input: Extract other specific sections ### Response: def _extract_docs_other(self): """Extract other specific sections""" if self.dst.style['in'] == 'numpydoc': data = '\n'.join([d.rstrip().replace(self.docs['out']['spaces'], '', 1) for d in self.docs['in']['raw'].splitlines()]) lst = self.dst.numpydoc.get_list_key(data, 'also') lst = self.dst.numpydoc.get_list_key(data, 'ref') lst = self.dst.numpydoc.get_list_key(data, 'note') lst = self.dst.numpydoc.get_list_key(data, 'other') lst = self.dst.numpydoc.get_list_key(data, 'example') lst = self.dst.numpydoc.get_list_key(data, 'attr')
def partial(self,start=0,end=None,run=0): '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column''' self.read_file() decon_stim = copy.copy(self) if start<0: start = 0 if self.type()=="column": decon_stim.column_file = None if end>=len(decon_stim.column): end = None if end==None: decon_stim.column = decon_stim.column[start:] else: decon_stim.column = decon_stim.column[start:end+1] if len(decon_stim.column)==0: return None if self.type()=="times": if self.TR==None: nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error) return None def time_in(a): first_number = r'^(\d+(\.\d+)?)' if isinstance(a,basestring): m = re.match(first_number,a) if m: a = m.group(1) else: nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning) return False a = float(a)/self.TR if a>=start and (end==None or a<=end): return True return False decon_stim.times_file = None if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]): decon_stim.times = [decon_stim.times] decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)] if len(nl.flatten(decon_stim.times))==0: return None return decon_stim
chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column
Below is the the instruction that describes the task: ### Input: chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column ### Response: def partial(self,start=0,end=None,run=0): '''chops the stimulus by only including time points ``start`` through ``end`` (in reps, inclusive; ``None``=until the end) if using stim_times-style simulus, will change the ``run``'th run. If a column, will just chop the column''' self.read_file() decon_stim = copy.copy(self) if start<0: start = 0 if self.type()=="column": decon_stim.column_file = None if end>=len(decon_stim.column): end = None if end==None: decon_stim.column = decon_stim.column[start:] else: decon_stim.column = decon_stim.column[start:end+1] if len(decon_stim.column)==0: return None if self.type()=="times": if self.TR==None: nl.notify('Error: cannot get partial segment of a stim_times stimulus without a TR',level=nl.level.error) return None def time_in(a): first_number = r'^(\d+(\.\d+)?)' if isinstance(a,basestring): m = re.match(first_number,a) if m: a = m.group(1) else: nl.notify('Warning: cannot intepret a number from the stim_time: "%s"' % a,level=nl.level.warning) return False a = float(a)/self.TR if a>=start and (end==None or a<=end): return True return False decon_stim.times_file = None if len(decon_stim.times)==0 or '__iter__' not in dir(decon_stim.times[0]): decon_stim.times = [decon_stim.times] decon_stim.times[run] = [x for x in decon_stim.times[run] if time_in(x)] if len(nl.flatten(decon_stim.times))==0: return None return decon_stim
def filter_unnecessary_segments(query): """Filter segments are not needed on CVX""" segment_model = segment_models.NetworkSegment network_model = models_v2.Network query = (query .join_if_necessary(network_model) .join_if_necessary(segment_model) .filter(network_model.project_id != '') .filter_network_type()) return query
Filter segments are not needed on CVX
Below is the the instruction that describes the task: ### Input: Filter segments are not needed on CVX ### Response: def filter_unnecessary_segments(query): """Filter segments are not needed on CVX""" segment_model = segment_models.NetworkSegment network_model = models_v2.Network query = (query .join_if_necessary(network_model) .join_if_necessary(segment_model) .filter(network_model.project_id != '') .filter_network_type()) return query
def fig2x(figure, format): """Returns svg from matplotlib chart""" # Save svg to file like object svg_io io = StringIO() figure.savefig(io, format=format) # Rewind the file like object io.seek(0) data = io.getvalue() io.close() return data
Returns svg from matplotlib chart
Below is the the instruction that describes the task: ### Input: Returns svg from matplotlib chart ### Response: def fig2x(figure, format): """Returns svg from matplotlib chart""" # Save svg to file like object svg_io io = StringIO() figure.savefig(io, format=format) # Rewind the file like object io.seek(0) data = io.getvalue() io.close() return data
def twoplustwo_player(username): """Get profile information about a Two plus Two Forum member given the username.""" from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError try: member = ForumMember(username) except UserNotFoundError: raise click.ClickException('User "%s" not found!' % username) except AmbiguousUserNameError as e: click.echo('Got multiple users with similar names!', err=True) for ind, user in enumerate(e.users): click.echo('{}. {}'.format(ind + 1, user.name), err=True) number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)), prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True) userid = e.users[int(number) - 1].id member = ForumMember.from_userid(userid) click.echo(err=True) # empty line after input _print_header('Two plus two forum member') _print_values( ('Username', member.username), ('Forum id', member.id), ('Location', member.location), ('Total posts', member.total_posts), ('Posts per day', member.posts_per_day), ('Rank', member.rank), ('Last activity', member.last_activity), ('Join date', member.join_date), ('Usergroups', member.public_usergroups), ('Profile picture', member.profile_picture), ('Avatar', member.avatar), )
Get profile information about a Two plus Two Forum member given the username.
Below is the the instruction that describes the task: ### Input: Get profile information about a Two plus Two Forum member given the username. ### Response: def twoplustwo_player(username): """Get profile information about a Two plus Two Forum member given the username.""" from .website.twoplustwo import ForumMember, AmbiguousUserNameError, UserNotFoundError try: member = ForumMember(username) except UserNotFoundError: raise click.ClickException('User "%s" not found!' % username) except AmbiguousUserNameError as e: click.echo('Got multiple users with similar names!', err=True) for ind, user in enumerate(e.users): click.echo('{}. {}'.format(ind + 1, user.name), err=True) number = click.prompt('Which would you like to see [{}-{}]'.format(1, len(e.users)), prompt_suffix='? ', type=click.IntRange(1, len(e.users)), err=True) userid = e.users[int(number) - 1].id member = ForumMember.from_userid(userid) click.echo(err=True) # empty line after input _print_header('Two plus two forum member') _print_values( ('Username', member.username), ('Forum id', member.id), ('Location', member.location), ('Total posts', member.total_posts), ('Posts per day', member.posts_per_day), ('Rank', member.rank), ('Last activity', member.last_activity), ('Join date', member.join_date), ('Usergroups', member.public_usergroups), ('Profile picture', member.profile_picture), ('Avatar', member.avatar), )
def _crop_pad_default(x, size, padding_mode='reflection', row_pct:uniform = 0.5, col_pct:uniform = 0.5): "Crop and pad tfm - `row_pct`,`col_pct` sets focal point." padding_mode = _pad_mode_convert[padding_mode] size = tis2hw(size) if x.shape[1:] == torch.Size(size): return x rows,cols = size row_pct,col_pct = _minus_epsilon(row_pct,col_pct) if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] return x.contiguous()
Crop and pad tfm - `row_pct`,`col_pct` sets focal point.
Below is the the instruction that describes the task: ### Input: Crop and pad tfm - `row_pct`,`col_pct` sets focal point. ### Response: def _crop_pad_default(x, size, padding_mode='reflection', row_pct:uniform = 0.5, col_pct:uniform = 0.5): "Crop and pad tfm - `row_pct`,`col_pct` sets focal point." padding_mode = _pad_mode_convert[padding_mode] size = tis2hw(size) if x.shape[1:] == torch.Size(size): return x rows,cols = size row_pct,col_pct = _minus_epsilon(row_pct,col_pct) if x.size(1)<rows or x.size(2)<cols: row_pad = max((rows-x.size(1)+1)//2, 0) col_pad = max((cols-x.size(2)+1)//2, 0) x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0] row = int((x.size(1)-rows+1)*row_pct) col = int((x.size(2)-cols+1)*col_pct) x = x[:, row:row+rows, col:col+cols] return x.contiguous()
def add(self, word, count=1): """Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1. """ if word not in self: super(Vocab, self).__setitem__(word, len(self)) self._counts[word] += count return self[word]
Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1.
Below is the the instruction that describes the task: ### Input: Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1. ### Response: def add(self, word, count=1): """Add a word to the vocabulary and return its index. :param word: word to add to the dictionary. :param count: how many times to add the word. :return: index of the added word. WARNING: this function assumes that if the Vocab currently has N words, then there is a perfect bijection between these N words and the integers 0 through N-1. """ if word not in self: super(Vocab, self).__setitem__(word, len(self)) self._counts[word] += count return self[word]
def expectation_sensitivity(T, a): r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value. """ M = T.shape[0] S = numpy.zeros((M, M)) for i in range(M): S += a[i] * stationary_distribution_sensitivity(T, i) return S
r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value.
Below is the the instruction that describes the task: ### Input: r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value. ### Response: def expectation_sensitivity(T, a): r"""Sensitivity of expectation value of observable A=(a_i). Parameters ---------- T : (M, M) ndarray Transition matrix a : (M,) ndarray Observable, a[i] is the value of the observable at state i. Returns ------- S : (M, M) ndarray Sensitivity matrix of the expectation value. """ M = T.shape[0] S = numpy.zeros((M, M)) for i in range(M): S += a[i] * stationary_distribution_sensitivity(T, i) return S
def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15
Below is the the instruction that describes the task: ### Input: r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 ### Response: def assign(self, **kwargs): r""" Assign new columns to a DataFrame. Returns a new object with all original columns in addition to new ones. Existing columns that are re-assigned will be overwritten. Parameters ---------- **kwargs : dict of {str: callable or Series} The column names are keywords. If the values are callable, they are computed on the DataFrame and assigned to the new columns. The callable must not change input DataFrame (though pandas doesn't check it). If the values are not callable, (e.g. a Series, scalar, or array), they are simply assigned. Returns ------- DataFrame A new DataFrame with the new columns in addition to all the existing columns. Notes ----- Assigning multiple columns within the same ``assign`` is possible. For Python 3.6 and above, later items in '\*\*kwargs' may refer to newly created or modified columns in 'df'; items are computed and assigned into 'df' in order. For Python 3.5 and below, the order of keyword arguments is not specified, you cannot refer to newly created or modified columns. All items are computed first, and then assigned in alphabetical order. .. versionchanged :: 0.23.0 Keyword argument order is maintained for Python 3.6 and later. Examples -------- >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]}, ... index=['Portland', 'Berkeley']) >>> df temp_c Portland 17.0 Berkeley 25.0 Where the value is a callable, evaluated on `df`: >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 Alternatively, the same behavior can be achieved by directly referencing an existing Series or sequence: >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32) temp_c temp_f Portland 17.0 62.6 Berkeley 25.0 77.0 In Python 3.6+, you can create multiple columns within the same assign where one of the columns depends on another one defined within the same assign: >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32, ... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9) temp_c temp_f temp_k Portland 17.0 62.6 290.15 Berkeley 25.0 77.0 298.15 """ data = self.copy() # >= 3.6 preserve order of kwargs if PY36: for k, v in kwargs.items(): data[k] = com.apply_if_callable(v, data) else: # <= 3.5: do all calculations first... results = OrderedDict() for k, v in kwargs.items(): results[k] = com.apply_if_callable(v, data) # <= 3.5 and earlier results = sorted(results.items()) # ... and then assign for k, v in results: data[k] = v return data
def filters_in_format(self, value): """ Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "filters_in_format", value) assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format( "filters_in_format", value) self.__filters_in_format = value
Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode
Below is the the instruction that describes the task: ### Input: Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode ### Response: def filters_in_format(self, value): """ Setter for **self.__filters_in_format** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "filters_in_format", value) assert os.path.exists(value), "'{0}' attribute: '{1}' file doesn't exists!".format( "filters_in_format", value) self.__filters_in_format = value
def create_string_array(self, key, value): """Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: if isinstance(value, (list)): data = self.db.create(key.strip(), json.dumps(value)) else: # used to save raw value with embedded variables data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write.
Below is the the instruction that describes the task: ### Input: Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. ### Response: def create_string_array(self, key, value): """Create method of CRUD operation for string array data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: if isinstance(value, (list)): data = self.db.create(key.strip(), json.dumps(value)) else: # used to save raw value with embedded variables data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered.
Below is the the instruction that describes the task: ### Input: Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. ### Response: def _el_orb(string): """Parse the element and orbital argument strings. The presence of an element without any orbitals means that we want to plot all of its orbitals. Args: string (str): The element and orbitals as a string, in the form ``"C.s.p,O"``. Returns: dict: The elements and orbitals as a :obj:`dict`. For example:: {'Bi': ['s', 'px', 'py', 'd']}. If an element symbol is included with an empty list, then all orbitals for that species are considered. """ el_orbs = {} for split in string.split(','): orbs = split.split('.') orbs = [orbs[0], 's', 'p', 'd', 'f'] if len(orbs) == 1 else orbs el_orbs[orbs.pop(0)] = orbs return el_orbs
def set_env_from_file(env_file): '''Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file. ''' with open(env_file, 'r') as f: env_dict = yaml.load(f.read()) if 'environment' in env_dict: env_dict = env_dict['environment'] set_env(env_dict)
Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file.
Below is the the instruction that describes the task: ### Input: Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file. ### Response: def set_env_from_file(env_file): '''Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file. ''' with open(env_file, 'r') as f: env_dict = yaml.load(f.read()) if 'environment' in env_dict: env_dict = env_dict['environment'] set_env(env_dict)
def pct_negative(self, threshold=0.0): """Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float """ return np.count_nonzero(self[self < threshold]) / self.count()
Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float
Below is the the instruction that describes the task: ### Input: Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float ### Response: def pct_negative(self, threshold=0.0): """Pct. of periods in which `self` is less than `threshold.` Parameters ---------- threshold : {float, TSeries, pd.Series}, default 0. Returns ------- float """ return np.count_nonzero(self[self < threshold]) / self.count()
def listen_init(self): """Setup the service to listen for clients.""" self.dispatcher = ObjectDispatch(self) self.factory = MsgPackProtocolFactory(self.dispatcher) self.server = UnixServer(self.loop, self.factory, self.path) self.server.start()
Setup the service to listen for clients.
Below is the the instruction that describes the task: ### Input: Setup the service to listen for clients. ### Response: def listen_init(self): """Setup the service to listen for clients.""" self.dispatcher = ObjectDispatch(self) self.factory = MsgPackProtocolFactory(self.dispatcher) self.server = UnixServer(self.loop, self.factory, self.path) self.server.start()
def set_lock(i): """ Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully } """ p=i['path'] gl=i.get('get_lock','') uuid=i.get('unlock_uid','') exp=float(i.get('lock_expire_time','30')) rr={'return':0} if gl=='yes' or uuid!='': pl=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_for_lock']) luid='' if os.path.isfile(pl): import time # Read lock file try: f=open(pl) luid=f.readline().strip() exp=float(f.readline().strip()) if exp<0: exp=1 f.close() except Exception as e: return {'return':1, 'error':'problem reading lock file'} # Check if lock has expired if gl=='yes' and uuid=='': # Retry if locked retry=int(i.get('lock_retries','11')) retryd=float(i.get('lock_retry_delay','3')) dt=os.path.getmtime(pl)+exp-time.time() if dt>0: while retry>0 and os.path.isfile(pl) and dt>0: retry-=1 time.sleep(retryd) if os.path.isfile(pl): dt=os.path.getmtime(pl)+exp-time.time() if retry==0 and dt>0 and os.path.isfile(pl): return {'return':32, 'error':'entry is still locked'} luid='' if os.path.isfile(pl): os.remove(pl) # Release lock if requested (and if not locked by another UID) if luid!='' and uuid!='': if luid!=uuid: return {'return':32, 'error': 'entry is locked with another UID'} luid='' os.remove(pl) # Finish acquiring lock if gl=='yes': # (Re)acquire lock if uuid=='': r=gen_uid({}) if r['return']>0: return r luid=r['data_uid'] else: luid=uuid # Write lock file try: f=open(pl,'w') f.write(luid+'\n') f.write(str(exp)+'\n') f.close() except Exception as e: return {'return':1, 'error':'problem writing lock file'} rr['lock_uid']=luid return rr
Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully }
Below is the the instruction that describes the task: ### Input: Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully } ### Response: def set_lock(i): """ Input: { path - path to be locked (get_lock) - if 'yes', lock this entry (lock_retries) - number of retries to aquire lock (default=11) (lock_retry_delay) - delay in seconds before trying to aquire lock again (default=3) (lock_expire_time) - number of seconds before lock expires (default=30) (unlock_uid) - UID of the lock to release it } Output: { return - return code = 0, if successful = 32, couldn't acquire lock (still locked after all retries) > 0, if error (error) - error text if return > 0 (lock_uid) - lock UID, if locked successfully } """ p=i['path'] gl=i.get('get_lock','') uuid=i.get('unlock_uid','') exp=float(i.get('lock_expire_time','30')) rr={'return':0} if gl=='yes' or uuid!='': pl=os.path.join(p, cfg['subdir_ck_ext'], cfg['file_for_lock']) luid='' if os.path.isfile(pl): import time # Read lock file try: f=open(pl) luid=f.readline().strip() exp=float(f.readline().strip()) if exp<0: exp=1 f.close() except Exception as e: return {'return':1, 'error':'problem reading lock file'} # Check if lock has expired if gl=='yes' and uuid=='': # Retry if locked retry=int(i.get('lock_retries','11')) retryd=float(i.get('lock_retry_delay','3')) dt=os.path.getmtime(pl)+exp-time.time() if dt>0: while retry>0 and os.path.isfile(pl) and dt>0: retry-=1 time.sleep(retryd) if os.path.isfile(pl): dt=os.path.getmtime(pl)+exp-time.time() if retry==0 and dt>0 and os.path.isfile(pl): return {'return':32, 'error':'entry is still locked'} luid='' if os.path.isfile(pl): os.remove(pl) # Release lock if requested (and if not locked by another UID) if luid!='' and uuid!='': if luid!=uuid: return {'return':32, 'error': 'entry is locked with another UID'} luid='' os.remove(pl) # Finish acquiring lock if gl=='yes': # (Re)acquire lock if uuid=='': r=gen_uid({}) if r['return']>0: return r luid=r['data_uid'] else: luid=uuid # Write lock file try: f=open(pl,'w') f.write(luid+'\n') f.write(str(exp)+'\n') f.close() except Exception as e: return {'return':1, 'error':'problem writing lock file'} rr['lock_uid']=luid return rr
def sub_dirs(path, invisible=False): """ Child directories (non-recursive) """ dirs = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] if not invisible: dirs = [x for x in dirs if not x.startswith('.')] return dirs
Child directories (non-recursive)
Below is the the instruction that describes the task: ### Input: Child directories (non-recursive) ### Response: def sub_dirs(path, invisible=False): """ Child directories (non-recursive) """ dirs = [x for x in os.listdir(path) if os.path.isdir(os.path.join(path, x))] if not invisible: dirs = [x for x in dirs if not x.startswith('.')] return dirs
def build_sh_cmd(cmd, cwd=None): """Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` """ args = cmd.split() return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command`
Below is the the instruction that describes the task: ### Input: Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` ### Response: def build_sh_cmd(cmd, cwd=None): """Build a `sh.Command` from a string. :param cmd: String with the command to convert. :param cwd: Optional path to use as working directory. :return: `sh.Command` """ args = cmd.split() return getattr(sh, args[0]).bake(_cwd=cwd, *args[1:])
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): """Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file.
Below is the the instruction that describes the task: ### Input: Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. ### Response: def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0, *args, **kwargs): """Read a CSV file in and parse it into Pandas DataFrames. By default, the first row from the first partition of that data is parsed and used as the column names for the data from. If no 'names' param is provided we parse the first row of the first partition of data and use it for column names. Parameters ---------- file_path: string Path to input. Any valid file path in Spark works here, eg: 'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/' use_whole_file: boolean Whether of not to use the whole file. names: list of strings, optional skiprows: integer, optional indicates how many rows of input to skip. This will only be applied to the first partition of the data (so if #skiprows > #row in first partition this will not work). Generally this shouldn't be an issue for small values of skiprows. No other value of header is supported. All additional parameters available in pandas.read_csv() are usable here. Returns ------- A SparklingPandas DataFrame that contains the data from the specified file. """ def csv_file(partition_number, files): # pylint: disable=unexpected-keyword-arg file_count = 0 for _, contents in files: # Only skip lines on the first file if partition_number == 0 and file_count == 0 and _skiprows > 0: yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs) else: file_count += 1 yield pandas.read_csv( sio(contents), *args, header=None, names=mynames, **kwargs) def csv_rows(partition_number, rows): # pylint: disable=unexpected-keyword-arg in_str = "\n".join(rows) if partition_number == 0: return iter([ pandas.read_csv( sio(in_str), *args, header=None, names=mynames, skiprows=_skiprows, **kwargs)]) else: # could use .iterows instead? return iter([pandas.read_csv(sio(in_str), *args, header=None, names=mynames, **kwargs)]) # If we need to peak at the first partition and determine the column # names mynames = None _skiprows = skiprows if names: mynames = names else: # In the future we could avoid this expensive call. first_line = self.spark_ctx.textFile(file_path).first() frame = pandas.read_csv(sio(first_line), **kwargs) # pylint sees frame as a tuple despite it being a DataFrame mynames = list(frame.columns) _skiprows += 1 # Do the actual load if use_whole_file: return self.from_pandas_rdd( self.spark_ctx.wholeTextFiles(file_path) .mapPartitionsWithIndex(csv_file)) else: return self.from_pandas_rdd( self.spark_ctx.textFile(file_path) .mapPartitionsWithIndex(csv_rows))
def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
Return True and delete children if small enough.
Below is the the instruction that describes the task: ### Input: Return True and delete children if small enough. ### Response: def _prune_all_if_small(self, small_size, a_or_u): "Return True and delete children if small enough." if self._nodes is None: return True total_size = (self.app_size() if a_or_u else self.use_size()) if total_size < small_size: if a_or_u: self._set_size(total_size, self.use_size()) else: self._set_size(self.app_size(), total_size) return True return False
def plotInferenceStats(self, fields, plotDir="plots", experimentID=0, onePlot=True): """ Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot. """ if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() stats = self.statistics[experimentID] objectName = stats["object"] for i in xrange(self.numColumns): if not onePlot: plt.figure() # plot request stats for field in fields: fieldKey = field + " C" + str(i) plt.plot(stats[fieldKey], marker='+', label=fieldKey) # format plt.legend(loc="upper right") plt.xlabel("Sensation #") plt.xticks(range(stats["numSteps"])) plt.ylabel("Number of active bits") plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5) plt.title("Object inference for object {}".format(objectName)) # save if not onePlot: relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close() if onePlot: relPath = "{}_exp_{}.png".format(self.name, experimentID) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close()
Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot.
Below is the the instruction that describes the task: ### Input: Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot. ### Response: def plotInferenceStats(self, fields, plotDir="plots", experimentID=0, onePlot=True): """ Plots and saves the desired inference statistics. Parameters: ---------------------------- @param fields (list(str)) List of fields to include in the plots @param experimentID (int) ID of the experiment (usually 0 if only one was conducted) @param onePlot (bool) If true, all cortical columns will be merged in one plot. """ if not os.path.exists(plotDir): os.makedirs(plotDir) plt.figure() stats = self.statistics[experimentID] objectName = stats["object"] for i in xrange(self.numColumns): if not onePlot: plt.figure() # plot request stats for field in fields: fieldKey = field + " C" + str(i) plt.plot(stats[fieldKey], marker='+', label=fieldKey) # format plt.legend(loc="upper right") plt.xlabel("Sensation #") plt.xticks(range(stats["numSteps"])) plt.ylabel("Number of active bits") plt.ylim(plt.ylim()[0] - 5, plt.ylim()[1] + 5) plt.title("Object inference for object {}".format(objectName)) # save if not onePlot: relPath = "{}_exp_{}_C{}.png".format(self.name, experimentID, i) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close() if onePlot: relPath = "{}_exp_{}.png".format(self.name, experimentID) path = os.path.join(plotDir, relPath) plt.savefig(path) plt.close()
def add_deferred_effect(self, effect, pos): """ Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'.""" if not isinstance(pos, (unicode, str)): raise Exception("Invalid POS tag. Must be string not %d" % (type(pos))) if self['speaker_model']['is_syntax_stacked'] == True: self.__dict__['deferred_effects'].insert(0,(pos, effect,)) elif self['speaker_model']['is_syntax_stacked'] == False: self.__dict__['deferred_effects'].append((pos, effect,)) else: raise Contradiction("Speaker Model undefined")
Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'.
Below is the the instruction that describes the task: ### Input: Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'. ### Response: def add_deferred_effect(self, effect, pos): """ Pushes an (pos, effect) tuple onto a stack to later be executed if the state reaches the 'pos'.""" if not isinstance(pos, (unicode, str)): raise Exception("Invalid POS tag. Must be string not %d" % (type(pos))) if self['speaker_model']['is_syntax_stacked'] == True: self.__dict__['deferred_effects'].insert(0,(pos, effect,)) elif self['speaker_model']['is_syntax_stacked'] == False: self.__dict__['deferred_effects'].append((pos, effect,)) else: raise Contradiction("Speaker Model undefined")
async def download_media_by_id(self, media_id): """Given a message ID, finds the media this message contained and downloads it. """ try: msg = self.found_media[int(media_id)] except (ValueError, KeyError): # ValueError when parsing, KeyError when accessing dictionary print('Invalid media ID given or message not found!') return print('Downloading media to usermedia/...') os.makedirs('usermedia', exist_ok=True) output = await self.download_media( msg.media, file='usermedia/', progress_callback=self.download_progress_callback ) print('Media downloaded to {}!'.format(output))
Given a message ID, finds the media this message contained and downloads it.
Below is the the instruction that describes the task: ### Input: Given a message ID, finds the media this message contained and downloads it. ### Response: async def download_media_by_id(self, media_id): """Given a message ID, finds the media this message contained and downloads it. """ try: msg = self.found_media[int(media_id)] except (ValueError, KeyError): # ValueError when parsing, KeyError when accessing dictionary print('Invalid media ID given or message not found!') return print('Downloading media to usermedia/...') os.makedirs('usermedia', exist_ok=True) output = await self.download_media( msg.media, file='usermedia/', progress_callback=self.download_progress_callback ) print('Media downloaded to {}!'.format(output))
def check_num_tasks(chain, task_count): """Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure. """ errors = [] # hardcode for now. If we need a different set of constraints, either # go by cot_product settings or by task_count['docker-image'] + 1 min_decision_tasks = 1 if task_count['decision'] < min_decision_tasks: errors.append("{} decision tasks; we must have at least {}!".format( task_count['decision'], min_decision_tasks )) raise_on_errors(errors)
Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure.
Below is the the instruction that describes the task: ### Input: Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure. ### Response: def check_num_tasks(chain, task_count): """Make sure there are a specific number of specific task types. Currently we only check decision tasks. Args: chain (ChainOfTrust): the chain we're operating on task_count (dict): mapping task type to the number of links. Raises: CoTError: on failure. """ errors = [] # hardcode for now. If we need a different set of constraints, either # go by cot_product settings or by task_count['docker-image'] + 1 min_decision_tasks = 1 if task_count['decision'] < min_decision_tasks: errors.append("{} decision tasks; we must have at least {}!".format( task_count['decision'], min_decision_tasks )) raise_on_errors(errors)
def get_comments(self): """Get a list of the top-level comments.""" url = self._imgur._base_url + "/3/gallery/{0}/comments".format(self.id) resp = self._imgur._send_request(url) return [Comment(com, self._imgur) for com in resp]
Get a list of the top-level comments.
Below is the the instruction that describes the task: ### Input: Get a list of the top-level comments. ### Response: def get_comments(self): """Get a list of the top-level comments.""" url = self._imgur._base_url + "/3/gallery/{0}/comments".format(self.id) resp = self._imgur._send_request(url) return [Comment(com, self._imgur) for com in resp]
def process_pubmed_abstract(pubmed_id, offline=False, output_fname=default_output_fname, **kwargs): """Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ abs_txt = pubmed_client.get_abstract(pubmed_id) if abs_txt is None: return None rp = process_text(abs_txt, citation=pubmed_id, offline=offline, output_fname=output_fname, **kwargs) if rp and rp.statements: for st in rp.statements: for ev in st.evidence: ev.epistemics['section_type'] = 'abstract' return rp
Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements.
Below is the the instruction that describes the task: ### Input: Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. ### Response: def process_pubmed_abstract(pubmed_id, offline=False, output_fname=default_output_fname, **kwargs): """Return a ReachProcessor by processing an abstract with a given Pubmed id. Uses the Pubmed client to get the abstract. If that fails, None is returned. Parameters ---------- pubmed_id : str The ID of a Pubmed article. The string may start with PMID but passing just the ID also works. Examples: 27168024, PMID27168024 https://www.ncbi.nlm.nih.gov/pubmed/ offline : Optional[bool] If set to True, the REACH system is ran offline. Otherwise (by default) the web service is called. Default: False output_fname : Optional[str] The file to output the REACH JSON output to. Defaults to reach_output.json in current working directory. **kwargs : keyword arguments All other keyword arguments are passed directly to `process_text`. Returns ------- rp : ReachProcessor A ReachProcessor containing the extracted INDRA Statements in rp.statements. """ abs_txt = pubmed_client.get_abstract(pubmed_id) if abs_txt is None: return None rp = process_text(abs_txt, citation=pubmed_id, offline=offline, output_fname=output_fname, **kwargs) if rp and rp.statements: for st in rp.statements: for ev in st.evidence: ev.epistemics['section_type'] = 'abstract' return rp
def get_kde_contour(self, xax="area_um", yax="deform", xacc=None, yacc=None, kde_type="histogram", kde_kwargs={}, xscale="linear", yscale="linear"): """Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y). """ xax = xax.lower() yax = yax.lower() kde_type = kde_type.lower() if kde_type not in kde_methods.methods: raise ValueError("Not a valid kde type: {}!".format(kde_type)) # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) # accuracy (bin width) of KDE estimator if xacc is None: xacc = kde_methods.bin_width_doane(xs) / 5 if yacc is None: yacc = kde_methods.bin_width_doane(ys) / 5 # Ignore infs and nans bad = kde_methods.get_bad_vals(xs, ys) xc = xs[~bad] yc = ys[~bad] xnum = int(np.ceil((xc.max() - xc.min()) / xacc)) ynum = int(np.ceil((yc.max() - yc.min()) / yacc)) xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True) ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True) xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij") kde_fct = kde_methods.methods[kde_type] if len(x): density = kde_fct(events_x=xs, events_y=ys, xout=xmesh, yout=ymesh, **kde_kwargs) else: density = [] # Convert mesh back to linear scale if applicable if xscale == "log": xmesh = np.exp(xmesh) if yscale == "log": ymesh = np.exp(ymesh) return xmesh, ymesh, density
Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y).
Below is the the instruction that describes the task: ### Input: Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y). ### Response: def get_kde_contour(self, xax="area_um", yax="deform", xacc=None, yacc=None, kde_type="histogram", kde_kwargs={}, xscale="linear", yscale="linear"): """Evaluate the kernel density estimate for contour plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis xacc: float Contour accuracy in x direction yacc: float Contour accuracy in y direction kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- X, Y, Z : coordinates The kernel density Z evaluated on a rectangular grid (X,Y). """ xax = xax.lower() yax = yax.lower() kde_type = kde_type.lower() if kde_type not in kde_methods.methods: raise ValueError("Not a valid kde type: {}!".format(kde_type)) # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) # accuracy (bin width) of KDE estimator if xacc is None: xacc = kde_methods.bin_width_doane(xs) / 5 if yacc is None: yacc = kde_methods.bin_width_doane(ys) / 5 # Ignore infs and nans bad = kde_methods.get_bad_vals(xs, ys) xc = xs[~bad] yc = ys[~bad] xnum = int(np.ceil((xc.max() - xc.min()) / xacc)) ynum = int(np.ceil((yc.max() - yc.min()) / yacc)) xlin = np.linspace(xc.min(), xc.max(), xnum, endpoint=True) ylin = np.linspace(yc.min(), yc.max(), ynum, endpoint=True) xmesh, ymesh = np.meshgrid(xlin, ylin, indexing="ij") kde_fct = kde_methods.methods[kde_type] if len(x): density = kde_fct(events_x=xs, events_y=ys, xout=xmesh, yout=ymesh, **kde_kwargs) else: density = [] # Convert mesh back to linear scale if applicable if xscale == "log": xmesh = np.exp(xmesh) if yscale == "log": ymesh = np.exp(ymesh) return xmesh, ymesh, density
async def kick(self, count): """ Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked """ args = (count,) res = await self.conn.call(self.__funcs['kick'], args) if self.conn.version < (1, 7): return res.body[0][0] return res.body[0]
Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked
Below is the the instruction that describes the task: ### Input: Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked ### Response: async def kick(self, count): """ Kick `count` tasks from queue :param count: Tasks count to kick :return: Number of tasks actually kicked """ args = (count,) res = await self.conn.call(self.__funcs['kick'], args) if self.conn.version < (1, 7): return res.body[0][0] return res.body[0]
def run(uri, user_entry_point, args, env_vars=None, wait=True, capture_error=False, runner=_runner.ProcessRunnerType, extra_opts=None): # type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None """Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point. """ env_vars = env_vars or {} env_vars = env_vars.copy() _files.download_and_extract(uri, user_entry_point, _env.code_dir) install(user_entry_point, _env.code_dir, capture_error) _env.write_env_vars(env_vars) return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error)
Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point.
Below is the the instruction that describes the task: ### Input: Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point. ### Response: def run(uri, user_entry_point, args, env_vars=None, wait=True, capture_error=False, runner=_runner.ProcessRunnerType, extra_opts=None): # type: (str, str, List[str], Dict[str, str], bool, bool, _runner.RunnerType, Dict[str, str]) -> None """Download, prepare and executes a compressed tar file from S3 or provided directory as an user entrypoint. Runs the user entry point, passing env_vars as environment variables and args as command arguments. If the entry point is: - A Python package: executes the packages as >>> env_vars python -m module_name + args - A Python script: executes the script as >>> env_vars python module_name + args - Any other: executes the command as >>> env_vars /bin/sh -c ./module_name + args Example: >>>import sagemaker_containers >>>from sagemaker_containers.beta.framework import entry_point >>>env = sagemaker_containers.training_env() {'channel-input-dirs': {'training': '/opt/ml/input/training'}, 'model_dir': '/opt/ml/model', ...} >>>hyperparameters = env.hyperparameters {'batch-size': 128, 'model_dir': '/opt/ml/model'} >>>args = mapping.to_cmd_args(hyperparameters) ['--batch-size', '128', '--model_dir', '/opt/ml/model'] >>>env_vars = mapping.to_env_vars() ['SAGEMAKER_CHANNELS':'training', 'SAGEMAKER_CHANNEL_TRAINING':'/opt/ml/input/training', 'MODEL_DIR':'/opt/ml/model', ...} >>>entry_point.run('user_script', args, env_vars) SAGEMAKER_CHANNELS=training SAGEMAKER_CHANNEL_TRAINING=/opt/ml/input/training \ SAGEMAKER_MODEL_DIR=/opt/ml/model python -m user_script --batch-size 128 --model_dir /opt/ml/model Args: uri (str): the location of the module. user_entry_point (str): name of the user provided entry point args (list): A list of program arguments. env_vars (dict): A map containing the environment variables to be written (default: None). wait (bool): If the user entry point should be run to completion before this method returns (default: True). capture_error (bool): Default false. If True, the running process captures the stderr, and appends it to the returned Exception message in case of errors. runner (sagemaker_containers.beta.framework.runner.RunnerType): the type of runner object to be created (default: sagemaker_containers.beta.framework.runner.ProcessRunnerType). extra_opts (dict): Additional options for running the entry point (default: None). Currently, this only applies for MPI. Returns: sagemaker_containers.beta.framework.process.ProcessRunner: the runner object responsible for executing the entry point. """ env_vars = env_vars or {} env_vars = env_vars.copy() _files.download_and_extract(uri, user_entry_point, _env.code_dir) install(user_entry_point, _env.code_dir, capture_error) _env.write_env_vars(env_vars) return _runner.get(runner, user_entry_point, args, env_vars, extra_opts).run(wait, capture_error)
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3): """ Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure. """ if kmip_version < enums.KMIPVersion.KMIP_1_3: raise exceptions.VersionNotSupported( "KMIP {} does not support the ValidationInformation " "object.".format( kmip_version.value ) ) super(ValidationInformation, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_TYPE, local_buffer ): validation_authority_type = primitives.Enumeration( enums.ValidationAuthorityType, tag=enums.Tags.VALIDATION_AUTHORITY_TYPE ) validation_authority_type.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_type = validation_authority_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation authority type." ) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_COUNTRY, local_buffer ): validation_authority_country = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_COUNTRY ) validation_authority_country.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_country = validation_authority_country if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_URI, local_buffer): validation_authority_uri = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_URI ) validation_authority_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_uri = validation_authority_uri if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MAJOR, local_buffer ): validation_version_major = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MAJOR ) validation_version_major.read( local_buffer, kmip_version=kmip_version ) self._validation_version_major = validation_version_major else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation version major." ) if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MINOR, local_buffer ): validation_version_minor = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MINOR ) validation_version_minor.read( local_buffer, kmip_version=kmip_version ) self._validation_version_minor = validation_version_minor if self.is_tag_next(enums.Tags.VALIDATION_TYPE, local_buffer): validation_type = primitives.Enumeration( enums.ValidationType, tag=enums.Tags.VALIDATION_TYPE ) validation_type.read( local_buffer, kmip_version=kmip_version ) self._validation_type = validation_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation type." ) if self.is_tag_next(enums.Tags.VALIDATION_LEVEL, local_buffer): validation_level = primitives.Integer( tag=enums.Tags.VALIDATION_LEVEL ) validation_level.read(local_buffer, kmip_version=kmip_version) self._validation_level = validation_level else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation level." ) if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER, local_buffer ): validation_certificate_identifier = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER ) validation_certificate_identifier.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_identifier = \ validation_certificate_identifier if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_URI, local_buffer ): validation_certificate_uri = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_URI ) validation_certificate_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_uri = validation_certificate_uri if self.is_tag_next(enums.Tags.VALIDATION_VENDOR_URI, local_buffer): validation_vendor_uri = primitives.TextString( tag=enums.Tags.VALIDATION_VENDOR_URI ) validation_vendor_uri.read(local_buffer, kmip_version=kmip_version) self._validation_vendor_uri = validation_vendor_uri validation_profiles = [] while self.is_tag_next(enums.Tags.VALIDATION_PROFILE, local_buffer): validation_profile = primitives.TextString( tag=enums.Tags.VALIDATION_PROFILE ) validation_profile.read(local_buffer, kmip_version=kmip_version) validation_profiles.append(validation_profile) self._validation_profiles = validation_profiles self.is_oversized(local_buffer)
Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure.
Below is the the instruction that describes the task: ### Input: Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure. ### Response: def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_1_3): """ Read the data encoding the ValidationInformation structure and decode it into its constituent parts. Args: input_buffer (stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 2.0. Raises: InvalidKmipEncoding: Raised if the validation authority type, validation version major, validation type, and/or validation level are missing from the encoding. VersionNotSupported: Raised when a KMIP version is provided that does not support the ValidationInformation structure. """ if kmip_version < enums.KMIPVersion.KMIP_1_3: raise exceptions.VersionNotSupported( "KMIP {} does not support the ValidationInformation " "object.".format( kmip_version.value ) ) super(ValidationInformation, self).read( input_buffer, kmip_version=kmip_version ) local_buffer = utils.BytearrayStream(input_buffer.read(self.length)) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_TYPE, local_buffer ): validation_authority_type = primitives.Enumeration( enums.ValidationAuthorityType, tag=enums.Tags.VALIDATION_AUTHORITY_TYPE ) validation_authority_type.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_type = validation_authority_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation authority type." ) if self.is_tag_next( enums.Tags.VALIDATION_AUTHORITY_COUNTRY, local_buffer ): validation_authority_country = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_COUNTRY ) validation_authority_country.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_country = validation_authority_country if self.is_tag_next(enums.Tags.VALIDATION_AUTHORITY_URI, local_buffer): validation_authority_uri = primitives.TextString( tag=enums.Tags.VALIDATION_AUTHORITY_URI ) validation_authority_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_authority_uri = validation_authority_uri if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MAJOR, local_buffer ): validation_version_major = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MAJOR ) validation_version_major.read( local_buffer, kmip_version=kmip_version ) self._validation_version_major = validation_version_major else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation version major." ) if self.is_tag_next( enums.Tags.VALIDATION_VERSION_MINOR, local_buffer ): validation_version_minor = primitives.Integer( tag=enums.Tags.VALIDATION_VERSION_MINOR ) validation_version_minor.read( local_buffer, kmip_version=kmip_version ) self._validation_version_minor = validation_version_minor if self.is_tag_next(enums.Tags.VALIDATION_TYPE, local_buffer): validation_type = primitives.Enumeration( enums.ValidationType, tag=enums.Tags.VALIDATION_TYPE ) validation_type.read( local_buffer, kmip_version=kmip_version ) self._validation_type = validation_type else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation type." ) if self.is_tag_next(enums.Tags.VALIDATION_LEVEL, local_buffer): validation_level = primitives.Integer( tag=enums.Tags.VALIDATION_LEVEL ) validation_level.read(local_buffer, kmip_version=kmip_version) self._validation_level = validation_level else: raise exceptions.InvalidKmipEncoding( "The ValidationInformation encoding is missing the " "validation level." ) if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER, local_buffer ): validation_certificate_identifier = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_IDENTIFIER ) validation_certificate_identifier.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_identifier = \ validation_certificate_identifier if self.is_tag_next( enums.Tags.VALIDATION_CERTIFICATE_URI, local_buffer ): validation_certificate_uri = primitives.TextString( tag=enums.Tags.VALIDATION_CERTIFICATE_URI ) validation_certificate_uri.read( local_buffer, kmip_version=kmip_version ) self._validation_certificate_uri = validation_certificate_uri if self.is_tag_next(enums.Tags.VALIDATION_VENDOR_URI, local_buffer): validation_vendor_uri = primitives.TextString( tag=enums.Tags.VALIDATION_VENDOR_URI ) validation_vendor_uri.read(local_buffer, kmip_version=kmip_version) self._validation_vendor_uri = validation_vendor_uri validation_profiles = [] while self.is_tag_next(enums.Tags.VALIDATION_PROFILE, local_buffer): validation_profile = primitives.TextString( tag=enums.Tags.VALIDATION_PROFILE ) validation_profile.read(local_buffer, kmip_version=kmip_version) validation_profiles.append(validation_profile) self._validation_profiles = validation_profiles self.is_oversized(local_buffer)
def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # First, update the velocity. The new velocity is given as: # v = (inertia * v) + (cogRate * r1 * (localBest-pos)) # + (socRate * r2 * (globalBest-pos)) # # where r1 and r2 are random numbers between 0 and 1.0 lb=float(Configuration.get("nupic.hypersearch.randomLowerBound")) ub=float(Configuration.get("nupic.hypersearch.randomUpperBound")) self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) * self._cogRate * (self._bestPosition - self.getPosition())) if globalBestPosition is not None: self._velocity += rng.uniform(lb, ub) * self._socRate * ( globalBestPosition - self.getPosition()) # update position based on velocity self._position += self._velocity # Clip it self._position = max(self.min, self._position) self._position = min(self.max, self._position) # Return it return self.getPosition()
See comments in base class.
Below is the the instruction that describes the task: ### Input: See comments in base class. ### Response: def newPosition(self, globalBestPosition, rng): """See comments in base class.""" # First, update the velocity. The new velocity is given as: # v = (inertia * v) + (cogRate * r1 * (localBest-pos)) # + (socRate * r2 * (globalBest-pos)) # # where r1 and r2 are random numbers between 0 and 1.0 lb=float(Configuration.get("nupic.hypersearch.randomLowerBound")) ub=float(Configuration.get("nupic.hypersearch.randomUpperBound")) self._velocity = (self._velocity * self._inertia + rng.uniform(lb, ub) * self._cogRate * (self._bestPosition - self.getPosition())) if globalBestPosition is not None: self._velocity += rng.uniform(lb, ub) * self._socRate * ( globalBestPosition - self.getPosition()) # update position based on velocity self._position += self._velocity # Clip it self._position = max(self.min, self._position) self._position = min(self.max, self._position) # Return it return self.getPosition()
def get_images(self, obj): """Object of images serialized by tag name.""" return {str(i.tag): i.image.url for i in obj.images.all()}
Object of images serialized by tag name.
Below is the the instruction that describes the task: ### Input: Object of images serialized by tag name. ### Response: def get_images(self, obj): """Object of images serialized by tag name.""" return {str(i.tag): i.image.url for i in obj.images.all()}
def delete_channel_cb(self, viewer, channel): """Called when a channel is deleted from the main interface. Parameter is channel (a bunch).""" chname_del = channel.name # TODO: delete thumbs for this channel! self.logger.debug("deleting thumbs for channel '%s'" % (chname_del)) with self.thmblock: new_thumb_list = [] un_hilite_set = set([]) for thumbkey in self.thumb_list: chname = thumbkey[0] if chname != chname_del: new_thumb_list.append(thumbkey) else: if thumbkey in self.thumb_dict: del self.thumb_dict[thumbkey] un_hilite_set.add(thumbkey) self.thumb_list = new_thumb_list self._tkf_highlight -= un_hilite_set # Unhighlight self.fv.gui_do_oneshot('thumbs-reorder', self.reorder_thumbs)
Called when a channel is deleted from the main interface. Parameter is channel (a bunch).
Below is the the instruction that describes the task: ### Input: Called when a channel is deleted from the main interface. Parameter is channel (a bunch). ### Response: def delete_channel_cb(self, viewer, channel): """Called when a channel is deleted from the main interface. Parameter is channel (a bunch).""" chname_del = channel.name # TODO: delete thumbs for this channel! self.logger.debug("deleting thumbs for channel '%s'" % (chname_del)) with self.thmblock: new_thumb_list = [] un_hilite_set = set([]) for thumbkey in self.thumb_list: chname = thumbkey[0] if chname != chname_del: new_thumb_list.append(thumbkey) else: if thumbkey in self.thumb_dict: del self.thumb_dict[thumbkey] un_hilite_set.add(thumbkey) self.thumb_list = new_thumb_list self._tkf_highlight -= un_hilite_set # Unhighlight self.fv.gui_do_oneshot('thumbs-reorder', self.reorder_thumbs)
def set_pubsubhubbub(self): """Parses pubsubhubbub and email then sets value""" self.pubsubhubbub = None atom_links = self.soup.findAll('atom:link') for atom_link in atom_links: rel = atom_link.get('rel') if rel == "hub": self.pubsubhubbub = atom_link.get('href')
Parses pubsubhubbub and email then sets value
Below is the the instruction that describes the task: ### Input: Parses pubsubhubbub and email then sets value ### Response: def set_pubsubhubbub(self): """Parses pubsubhubbub and email then sets value""" self.pubsubhubbub = None atom_links = self.soup.findAll('atom:link') for atom_link in atom_links: rel = atom_link.get('rel') if rel == "hub": self.pubsubhubbub = atom_link.get('href')
def delete_service(self, service_id): """Delete a service.""" content = self._fetch("/service/%s" % service_id, method="DELETE") return self._status(content)
Delete a service.
Below is the the instruction that describes the task: ### Input: Delete a service. ### Response: def delete_service(self, service_id): """Delete a service.""" content = self._fetch("/service/%s" % service_id, method="DELETE") return self._status(content)
def _ast_optree_node_to_code(self, node, **kwargs): """Convert an abstract syntax operator tree to python source code.""" opnode = node.opnode if opnode is None: return self._ast_to_code(node.operands[0]) else: operator = opnode.operator if operator is OP_ALTERNATE: return self._ast_op_alternate_to_code(node, **kwargs) elif operator is OP_WS_CONCAT: kwargs["ignore_whitespace"] = False return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_CONCAT: kwargs["ignore_whitespace"] = True return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_EXCLUDE: return self._ast_op_exclude_to_code(node, **kwargs) elif operator is OP_MULTIPLY: return self._ast_op_multiply_to_code(node, **kwargs) elif operator is OP_REPEAT: return self._ast_op_repeat_to_code(node, **kwargs) else: raise Exception("Unhandled optree node: {0}".format(node))
Convert an abstract syntax operator tree to python source code.
Below is the the instruction that describes the task: ### Input: Convert an abstract syntax operator tree to python source code. ### Response: def _ast_optree_node_to_code(self, node, **kwargs): """Convert an abstract syntax operator tree to python source code.""" opnode = node.opnode if opnode is None: return self._ast_to_code(node.operands[0]) else: operator = opnode.operator if operator is OP_ALTERNATE: return self._ast_op_alternate_to_code(node, **kwargs) elif operator is OP_WS_CONCAT: kwargs["ignore_whitespace"] = False return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_CONCAT: kwargs["ignore_whitespace"] = True return self._ast_op_concat_to_code(node, **kwargs) elif operator is OP_EXCLUDE: return self._ast_op_exclude_to_code(node, **kwargs) elif operator is OP_MULTIPLY: return self._ast_op_multiply_to_code(node, **kwargs) elif operator is OP_REPEAT: return self._ast_op_repeat_to_code(node, **kwargs) else: raise Exception("Unhandled optree node: {0}".format(node))
def _execute_lua(self, keys, args, client): """ Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script """ lua, lua_globals = Script._import_lua(self.load_dependencies) lua_globals.KEYS = self._python_to_lua(keys) lua_globals.ARGV = self._python_to_lua(args) def _call(*call_args): # redis-py and native redis commands are mostly compatible argument # wise, but some exceptions need to be handled here: if str(call_args[0]).lower() == 'lrem': response = client.call( call_args[0], call_args[1], call_args[3], # "count", default is 0 call_args[2]) else: response = client.call(*call_args) return self._python_to_lua(response) lua_globals.redis = {"call": _call} return self._lua_to_python(lua.execute(self.script), return_status=True)
Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script
Below is the the instruction that describes the task: ### Input: Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script ### Response: def _execute_lua(self, keys, args, client): """ Sets KEYS and ARGV alongwith redis.call() function in lua globals and executes the lua redis script """ lua, lua_globals = Script._import_lua(self.load_dependencies) lua_globals.KEYS = self._python_to_lua(keys) lua_globals.ARGV = self._python_to_lua(args) def _call(*call_args): # redis-py and native redis commands are mostly compatible argument # wise, but some exceptions need to be handled here: if str(call_args[0]).lower() == 'lrem': response = client.call( call_args[0], call_args[1], call_args[3], # "count", default is 0 call_args[2]) else: response = client.call(*call_args) return self._python_to_lua(response) lua_globals.redis = {"call": _call} return self._lua_to_python(lua.execute(self.script), return_status=True)
def add_event(self, key, event): """Add an event and its corresponding key to the store.""" if self.key_exists(key): # This check might actually also be done further up in the chain # (read: SQLiteEventStore). Could potentially be removed if it # requires a lot of processor cycles. msg = "The key already existed: {0}".format(key) raise EventStore.EventKeyAlreadyExistError(msg) self._rotate_files_if_needed() # Since I guess LogEventStore is less mature codewise than # SQLiteEventStore I am writing to that log file first. If something # fails we are not writing to SQLiteEventStore. for store in self.stores: store.add_event(key, event) self.count += 1
Add an event and its corresponding key to the store.
Below is the the instruction that describes the task: ### Input: Add an event and its corresponding key to the store. ### Response: def add_event(self, key, event): """Add an event and its corresponding key to the store.""" if self.key_exists(key): # This check might actually also be done further up in the chain # (read: SQLiteEventStore). Could potentially be removed if it # requires a lot of processor cycles. msg = "The key already existed: {0}".format(key) raise EventStore.EventKeyAlreadyExistError(msg) self._rotate_files_if_needed() # Since I guess LogEventStore is less mature codewise than # SQLiteEventStore I am writing to that log file first. If something # fails we are not writing to SQLiteEventStore. for store in self.stores: store.add_event(key, event) self.count += 1
def get_syslog_facility(): """Get syslog facility from ENV var""" facil = os.getenv('WALE_SYSLOG_FACILITY', 'user') valid_facility = True try: facility = handlers.SysLogHandler.facility_names[facil.lower()] except KeyError: valid_facility = False facility = handlers.SysLogHandler.LOG_USER return facility, valid_facility
Get syslog facility from ENV var
Below is the the instruction that describes the task: ### Input: Get syslog facility from ENV var ### Response: def get_syslog_facility(): """Get syslog facility from ENV var""" facil = os.getenv('WALE_SYSLOG_FACILITY', 'user') valid_facility = True try: facility = handlers.SysLogHandler.facility_names[facil.lower()] except KeyError: valid_facility = False facility = handlers.SysLogHandler.LOG_USER return facility, valid_facility
def _write_packed_data(self, data_out, table): """This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table """ headers = [] data = [] qualifiers_marks = [] qualifiers = {} self._extract_independent_variables(table, headers, data, qualifiers_marks) for dependent_variable in table.dependent_variables: self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data) self._write_metadata(data_out, table) self._write_csv_data(data_out, qualifiers, qualifiers_marks, headers, data)
This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table
Below is the the instruction that describes the task: ### Input: This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table ### Response: def _write_packed_data(self, data_out, table): """This is kind of legacy function - this functionality may be useful for some people, so even though now the default of writing CSV is writing unpacked data (divided by independent variable) this method is still available and accessible if ```pack``` flag is specified in Writer's options :param output: output file like object to which data will be written :param table: input table :type table: hepdata_converter.parsers.Table """ headers = [] data = [] qualifiers_marks = [] qualifiers = {} self._extract_independent_variables(table, headers, data, qualifiers_marks) for dependent_variable in table.dependent_variables: self._parse_dependent_variable(dependent_variable, headers, qualifiers, qualifiers_marks, data) self._write_metadata(data_out, table) self._write_csv_data(data_out, qualifiers, qualifiers_marks, headers, data)
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): """ Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ))
Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return:
Below is the the instruction that describes the task: ### Input: Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: ### Response: def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): """ Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return: """ # ensure that edge exists before attempting to remove it existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ))
def split_matrix(M, contigs): """Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels """ index = 0 for _, chunk in itertools.groubpy(contigs): l = len(chunk) yield M[index : index + l, index : index + l] index += l
Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels
Below is the the instruction that describes the task: ### Input: Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels ### Response: def split_matrix(M, contigs): """Split multiple chromosome matrix Split a labeled matrix with multiple chromosomes into unlabeled single-chromosome matrices. Inter chromosomal contacts are discarded. Parameters ---------- M : array_like The multiple chromosome matrix to be split contigs : list or array_like The list of contig labels """ index = 0 for _, chunk in itertools.groubpy(contigs): l = len(chunk) yield M[index : index + l, index : index + l] index += l
def _is_dataset(uri, config_path): """Helper function for determining if a URI is a dataset.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker = _get_storage_broker(uri, config_path) return storage_broker.has_admin_metadata()
Helper function for determining if a URI is a dataset.
Below is the the instruction that describes the task: ### Input: Helper function for determining if a URI is a dataset. ### Response: def _is_dataset(uri, config_path): """Helper function for determining if a URI is a dataset.""" uri = dtoolcore.utils.sanitise_uri(uri) storage_broker = _get_storage_broker(uri, config_path) return storage_broker.has_admin_metadata()
def validate_intervals(intervals): """Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations. """ # Validate interval shape if intervals.ndim != 2 or intervals.shape[1] != 2: raise ValueError('Intervals should be n-by-2 numpy ndarray, ' 'but shape={}'.format(intervals.shape)) # Make sure no times are negative if (intervals < 0).any(): raise ValueError('Negative interval times found') # Make sure all intervals have strictly positive duration if (intervals[:, 1] <= intervals[:, 0]).any(): raise ValueError('All interval durations must be strictly positive')
Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations.
Below is the the instruction that describes the task: ### Input: Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations. ### Response: def validate_intervals(intervals): """Checks that an (n, 2) interval ndarray is well-formed, and raises errors if not. Parameters ---------- intervals : np.ndarray, shape=(n, 2) Array of interval start/end locations. """ # Validate interval shape if intervals.ndim != 2 or intervals.shape[1] != 2: raise ValueError('Intervals should be n-by-2 numpy ndarray, ' 'but shape={}'.format(intervals.shape)) # Make sure no times are negative if (intervals < 0).any(): raise ValueError('Negative interval times found') # Make sure all intervals have strictly positive duration if (intervals[:, 1] <= intervals[:, 0]).any(): raise ValueError('All interval durations must be strictly positive')
def pre_encrypt_assertion(response): """ Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion. """ assertion = response.assertion response.assertion = None response.encrypted_assertion = EncryptedAssertion() if assertion is not None: if isinstance(assertion, list): response.encrypted_assertion.add_extension_elements(assertion) else: response.encrypted_assertion.add_extension_element(assertion) return response
Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion.
Below is the the instruction that describes the task: ### Input: Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion. ### Response: def pre_encrypt_assertion(response): """ Move the assertion to within a encrypted_assertion :param response: The response with one assertion :return: The response but now with the assertion within an encrypted_assertion. """ assertion = response.assertion response.assertion = None response.encrypted_assertion = EncryptedAssertion() if assertion is not None: if isinstance(assertion, list): response.encrypted_assertion.add_extension_elements(assertion) else: response.encrypted_assertion.add_extension_element(assertion) return response
def get_comment_book_session(self): """Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.* """ if not self.supports_comment_book(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentBookSession(runtime=self._runtime)
Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.*
Below is the the instruction that describes the task: ### Input: Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.* ### Response: def get_comment_book_session(self): """Gets the session for retrieving comment to book mappings. return: (osid.commenting.CommentBookSession) - a ``CommentBookSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_comment_book()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_comment_book()`` is ``true``.* """ if not self.supports_comment_book(): raise errors.Unimplemented() # pylint: disable=no-member return sessions.CommentBookSession(runtime=self._runtime)
def _timedelta_from_elements(elements): """ Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month. """ days = sum(( elements['days'], _months_to_days(elements.get('months', 0)), _years_to_days(elements.get('years', 0)) )) return datetime.timedelta(days=days, hours=elements.get('hours', 0), minutes=elements.get('minutes', 0), seconds=elements.get('seconds', 0))
Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month.
Below is the the instruction that describes the task: ### Input: Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month. ### Response: def _timedelta_from_elements(elements): """ Return a timedelta from a dict of date elements. Accepts a dict containing any of the following: - years - months - days - hours - minutes - seconds If years and/or months are provided, it will use a naive calcuation of 365 days in a year and 30 days in a month. """ days = sum(( elements['days'], _months_to_days(elements.get('months', 0)), _years_to_days(elements.get('years', 0)) )) return datetime.timedelta(days=days, hours=elements.get('hours', 0), minutes=elements.get('minutes', 0), seconds=elements.get('seconds', 0))
def get_events(self, user_id, start_date=None): """Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/ """ if start_date is None: date_object = datetime.datetime.today() - datetime.timedelta(days=30) start_date = date_object.strftime("%Y-%m-%dT00:00:00") object_filter = { 'userId': { 'operation': user_id }, 'eventCreateDate': { 'operation': 'greaterThanDate', 'options': [{'name': 'date', 'value': [start_date]}] } } events = self.client.call('Event_Log', 'getAllObjects', filter=object_filter) if events is None: events = [{'eventName': 'No Events Found'}] return events
Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/
Below is the the instruction that describes the task: ### Input: Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/ ### Response: def get_events(self, user_id, start_date=None): """Gets the event log for a specific user, default start_date is 30 days ago :param int id: User id to view :param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string. The Timezone part has to be HH:MM, notice the : there. :returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/ """ if start_date is None: date_object = datetime.datetime.today() - datetime.timedelta(days=30) start_date = date_object.strftime("%Y-%m-%dT00:00:00") object_filter = { 'userId': { 'operation': user_id }, 'eventCreateDate': { 'operation': 'greaterThanDate', 'options': [{'name': 'date', 'value': [start_date]}] } } events = self.client.call('Event_Log', 'getAllObjects', filter=object_filter) if events is None: events = [{'eventName': 'No Events Found'}] return events
def create_shell_stream(self, kernel_id): """Create a new shell stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_shell_stream(kernel_id)
Create a new shell stream.
Below is the the instruction that describes the task: ### Input: Create a new shell stream. ### Response: def create_shell_stream(self, kernel_id): """Create a new shell stream.""" self._check_kernel_id(kernel_id) return super(MappingKernelManager, self).create_shell_stream(kernel_id)
def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """ if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached
Below is the the instruction that describes the task: ### Input: Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached ### Response: def read_bits(self, num): """Read ``num`` number of bits from the stream :num: number of bits to read :returns: a list of ``num`` bits, or an empty list if EOF has been reached """ if num > len(self._bits): needed = num - len(self._bits) num_bytes = int(math.ceil(needed / 8.0)) read_bytes = self._stream.read(num_bytes) for bit in bytes_to_bits(read_bytes): self._bits.append(bit) res = [] while len(res) < num and len(self._bits) > 0: res.append(self._bits.popleft()) return res
def generateOneTimePad(self, userStore): """ Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds. """ pad = secureRandom(16).encode('hex') self._oneTimePads[pad] = userStore.idInParent def expirePad(): self._oneTimePads.pop(pad, None) self.callLater(self.ONE_TIME_PAD_DURATION, expirePad) return pad
Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds.
Below is the the instruction that describes the task: ### Input: Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds. ### Response: def generateOneTimePad(self, userStore): """ Generate a pad which can be used to authenticate via AMP. This pad will expire in L{ONE_TIME_PAD_DURATION} seconds. """ pad = secureRandom(16).encode('hex') self._oneTimePads[pad] = userStore.idInParent def expirePad(): self._oneTimePads.pop(pad, None) self.callLater(self.ONE_TIME_PAD_DURATION, expirePad) return pad
def get_compositions_by_asset(self, asset_id): """Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetCompositionSession.get_compositions_by_asset collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find( dict({'assetIds': {'$in': [str(asset_id)]}}, **self._view_filter())).sort('_id', DESCENDING) return objects.CompositionList(result, runtime=self._runtime)
Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_compositions_by_asset(self, asset_id): """Gets a list of compositions including the given asset. arg: asset_id (osid.id.Id): ``Id`` of the ``Asset`` return: (osid.repository.CompositionList) - the returned ``Composition list`` raise: NotFound - ``asset_id`` is not found raise: NullArgument - ``asset_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetCompositionSession.get_compositions_by_asset collection = JSONClientValidated('repository', collection='Composition', runtime=self._runtime) result = collection.find( dict({'assetIds': {'$in': [str(asset_id)]}}, **self._view_filter())).sort('_id', DESCENDING) return objects.CompositionList(result, runtime=self._runtime)
def get_time(self, **params): """https://developers.coinbase.com/api/v2#time""" response = self._get('v2', 'time', params=params) return self._make_api_object(response, APIObject)
https://developers.coinbase.com/api/v2#time
Below is the the instruction that describes the task: ### Input: https://developers.coinbase.com/api/v2#time ### Response: def get_time(self, **params): """https://developers.coinbase.com/api/v2#time""" response = self._get('v2', 'time', params=params) return self._make_api_object(response, APIObject)
def _set_active_policy(self, v, load=False): """ Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """active_policy must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""", }) self.__active_policy = t if hasattr(self, '_set'): self._set()
Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy
Below is the the instruction that describes the task: ### Input: Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy ### Response: def _set_active_policy(self, v, load=False): """ Setter method for active_policy, mapped from YANG variable /rbridge_id/secpolicy/active_policy (container) If this variable is read-only (config: false) in the source YANG file, then _set_active_policy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active_policy() directly. YANG Description: Set the Active policy """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """active_policy must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=active_policy.active_policy, is_container='container', presence=False, yang_name="active-policy", rest_name="active-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Active policy set', u'cli-incomplete-no': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='container', is_config=True)""", }) self.__active_policy = t if hasattr(self, '_set'): self._set()
def quaternion_rotate(X, Y): """ Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D) """ N = X.shape[0] W = np.asarray([makeW(*Y[k]) for k in range(N)]) Q = np.asarray([makeQ(*X[k]) for k in range(N)]) Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)]) W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)]) A = np.sum(Qt_dot_W, axis=0) eigen = np.linalg.eigh(A) r = eigen[1][:, eigen[0].argmax()] rot = quaternion_transform(r) return rot
Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D)
Below is the the instruction that describes the task: ### Input: Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D) ### Response: def quaternion_rotate(X, Y): """ Calculate the rotation Parameters ---------- X : array (N,D) matrix, where N is points and D is dimension. Y: array (N,D) matrix, where N is points and D is dimension. Returns ------- rot : matrix Rotation matrix (D,D) """ N = X.shape[0] W = np.asarray([makeW(*Y[k]) for k in range(N)]) Q = np.asarray([makeQ(*X[k]) for k in range(N)]) Qt_dot_W = np.asarray([np.dot(Q[k].T, W[k]) for k in range(N)]) W_minus_Q = np.asarray([W[k] - Q[k] for k in range(N)]) A = np.sum(Qt_dot_W, axis=0) eigen = np.linalg.eigh(A) r = eigen[1][:, eigen[0].argmax()] rot = quaternion_transform(r) return rot
def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise LyftIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid.
Below is the the instruction that describes the task: ### Input: Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. ### Response: def _build_headers(self, method, auth_session): """Create headers for the request. Parameters method (str) HTTP method (e.g. 'POST'). auth_session (Session) The Session object containing OAuth 2.0 credentials. Returns headers (dict) Dictionary of access headers to attach to request. Raises LyftIllegalState (ApiError) Raised if headers are invalid. """ token_type = auth_session.token_type token = auth_session.oauth2credential.access_token if not self._authorization_headers_valid(token_type, token): message = 'Invalid token_type or token.' raise LyftIllegalState(message) headers = { 'Authorization': ' '.join([token_type, token]), } if method in http.BODY_METHODS: headers.update(http.DEFAULT_CONTENT_HEADERS) return headers
def plot_word(self,position): '''Blits a rendered word on to the main display surface''' posrectangle = pygame.Rect(position,self.word_size) self.used_pos.append(posrectangle) self.cloud.blit(self.rendered_word,position)
Blits a rendered word on to the main display surface
Below is the the instruction that describes the task: ### Input: Blits a rendered word on to the main display surface ### Response: def plot_word(self,position): '''Blits a rendered word on to the main display surface''' posrectangle = pygame.Rect(position,self.word_size) self.used_pos.append(posrectangle) self.cloud.blit(self.rendered_word,position)
def rect(self): """rect(self) -> PyObject *""" CheckParent(self) val = _fitz.Link_rect(self) val = Rect(val) return val
rect(self) -> PyObject *
Below is the the instruction that describes the task: ### Input: rect(self) -> PyObject * ### Response: def rect(self): """rect(self) -> PyObject *""" CheckParent(self) val = _fitz.Link_rect(self) val = Rect(val) return val
def segment_kmeans(self, rgb_weight, num_clusters, hue_weight=0.0): """ Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels """ # form features array label_offset = 1 nonzero_px = np.where(self.data != 0.0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] # get hsv data if specified color_vals = rgb_weight * \ self._data[nonzero_px[:, 0], nonzero_px[:, 1], :] if hue_weight > 0.0: hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV) color_vals = np.c_[color_vals, hue_weight * hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], :1]] features = np.c_[nonzero_px, color_vals.astype(np.float32)] # perform KMeans clustering kmeans = sc.KMeans(n_clusters=num_clusters) labels = kmeans.fit_predict(features) # create output label array label_im = np.zeros([self.height, self.width]).astype(np.uint8) label_im[nonzero_px[:, 0], nonzero_px[:, 1]] = labels + label_offset return SegmentationImage(label_im, frame=self.frame)
Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels
Below is the the instruction that describes the task: ### Input: Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels ### Response: def segment_kmeans(self, rgb_weight, num_clusters, hue_weight=0.0): """ Segment a color image using KMeans based on spatial and color distances. Black pixels will automatically be assigned to their own 'background' cluster. Parameters ---------- rgb_weight : float weighting of RGB distance relative to spatial and hue distance num_clusters : int number of clusters to use hue_weight : float weighting of hue from hsv relative to spatial and RGB distance Returns ------- :obj:`SegmentationImage` image containing the segment labels """ # form features array label_offset = 1 nonzero_px = np.where(self.data != 0.0) nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]] # get hsv data if specified color_vals = rgb_weight * \ self._data[nonzero_px[:, 0], nonzero_px[:, 1], :] if hue_weight > 0.0: hsv_data = cv2.cvtColor(self.data, cv2.COLOR_BGR2HSV) color_vals = np.c_[color_vals, hue_weight * hsv_data[nonzero_px[:, 0], nonzero_px[:, 1], :1]] features = np.c_[nonzero_px, color_vals.astype(np.float32)] # perform KMeans clustering kmeans = sc.KMeans(n_clusters=num_clusters) labels = kmeans.fit_predict(features) # create output label array label_im = np.zeros([self.height, self.width]).astype(np.uint8) label_im[nonzero_px[:, 0], nonzero_px[:, 1]] = labels + label_offset return SegmentationImage(label_im, frame=self.frame)
def import_family(self, rfa_file): """Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file """ self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file
Below is the the instruction that describes the task: ### Input: Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file ### Response: def import_family(self, rfa_file): """Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file """ self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
def echo_with_markers(text, marker='=', marker_color='blue', text_color=None): """Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') """ text = ' ' + text + ' ' width, _ = click.get_terminal_size() if len(text) >= width: click.echo(text) # this is probably never the case else: leftovers = width - len(text) click.secho(marker * (leftovers / 2), fg=marker_color, nl=False) click.secho(text, nl=False, fg=text_color) click.secho(marker * (leftovers / 2 + leftovers % 2), fg=marker_color)
Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white')
Below is the the instruction that describes the task: ### Input: Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') ### Response: def echo_with_markers(text, marker='=', marker_color='blue', text_color=None): """Print a text to the screen with markers surrounding it. The output looks like: ======== text ======== with marker='=' right now. In the event that the terminal window is too small, the text is printed without markers. :param str text: the text to echo :param str marker: the marker to surround the text :param str marker_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') :param str text_color: one of ('black' | 'red' | 'green' | 'yellow' | 'blue' | 'magenta' | 'cyan' | 'white') """ text = ' ' + text + ' ' width, _ = click.get_terminal_size() if len(text) >= width: click.echo(text) # this is probably never the case else: leftovers = width - len(text) click.secho(marker * (leftovers / 2), fg=marker_color, nl=False) click.secho(text, nl=False, fg=text_color) click.secho(marker * (leftovers / 2 + leftovers % 2), fg=marker_color)
def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing.
Below is the the instruction that describes the task: ### Input: Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. ### Response: def main_bigg(args=None, urlopen=urlopen): """Entry point for BiGG import program. If the ``args`` are provided, these should be a list of strings that will be used instead of ``sys.argv[1:]``. This is mostly useful for testing. """ parser = argparse.ArgumentParser( description='Import from BiGG database') parser.add_argument('--dest', metavar='path', default='.', help='Destination directory (default is ".")') parser.add_argument('--no-exchange', action='store_true', help=('Disable importing exchange reactions as' ' exchange compound file.')) parser.add_argument('--split-subsystem', action='store_true', help='Enable splitting reaction files by subsystem') parser.add_argument('--merge-compounds', action='store_true', help=('Merge identical compounds occuring in various' ' compartments.')) parser.add_argument('--force', action='store_true', help='Enable overwriting model files') parser.add_argument('id', help='BiGG model to import ("list" to see all)') args = parser.parse_args(args) # Set up logging for the command line interface if 'PSAMM_DEBUG' in os.environ: level = getattr(logging, os.environ['PSAMM_DEBUG'].upper(), None) if level is not None: logging.basicConfig(level=level) else: logging.basicConfig( level=logging.INFO, format='%(levelname)s: %(message)s') # Print list of available models if args.id == 'list': print('Available models:') f = urlopen('http://bigg.ucsd.edu/api/v2/models') doc = json.loads(f.read().decode('utf-8')) results = doc['results'] id_width = min(max(len(result['bigg_id']) for result in results), 16) for result in sorted(results, key=lambda x: x.get('organism')): print('{} {}'.format( result.get('bigg_id').ljust(id_width), result.get('organism'))) return 0 importer_entry = None try: importer_entry = next( pkg_resources.iter_entry_points('psamm.importer', 'JSON')) except StopIteration: logger.error('Failed to locate the COBRA JSON model importer!') sys.exit(-1) importer_class = importer_entry.load() importer = importer_class() try: f = urlopen( 'http://bigg.ucsd.edu/api/v2/models/{}/download'.format( url_quote(args.id))) model = importer.import_model(codecs.getreader('utf-8')(f)) except ModelLoadError as e: logger.error('Failed to load model!', exc_info=True) importer.help() parser.error(text_type(e)) except ParseError as e: logger.error('Failed to parse model!', exc_info=True) logger.error(text_type(e)) sys.exit(-1) if args.merge_compounds: compounds_before = len(model.compounds) sbml.merge_equivalent_compounds(model) if len(model.compounds) < compounds_before: logger.info( 'Merged {} compound entries into {} entries by' ' removing duplicates in various compartments'.format( compounds_before, len(model.compounds))) print('Model: {}'.format(model.name)) print('- Biomass reaction: {}'.format(model.biomass_reaction)) print('- Compartments: {}'.format(len(model.compartments))) print('- Compounds: {}'.format(len(model.compounds))) print('- Reactions: {}'.format(len(model.reactions))) print('- Genes: {}'.format(count_genes(model))) # Check if dest directory is empty. If we get an error assume that the # directory does not exist. dest_is_empty = False try: dest_is_empty = len(os.listdir(args.dest)) == 0 except OSError: dest_is_empty = True if not dest_is_empty: if not args.force: logger.error('Destination directory is not empty. Use --force' ' option to proceed anyway, overwriting any existing' ' files in {}'.format(args.dest)) return 1 else: logger.warning('Destination directory is not empty, overwriting' ' existing files in {}'.format(args.dest)) # Create destination directory if not exists dest = args.dest mkdir_p(dest) convert_exchange = not args.no_exchange write_yaml_model(model, dest, convert_exchange=convert_exchange, split_subsystem=args.split_subsystem)
def get_dc_owner(raises, mask_if_self): """ Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>".""" try: from pwd import getpwuid owner_uid = os.stat('/dev/console').st_uid self_uid = os.getuid() if mask_if_self and owner_uid == self_uid: return "<self>" owner_name = getpwuid(owner_uid).pw_name return owner_name except Exception as e: if raises: raise e else: return str(e)
Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>".
Below is the the instruction that describes the task: ### Input: Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>". ### Response: def get_dc_owner(raises, mask_if_self): """ Convenience function to return owner of /dev/console. If raises is True, this raises an exception on any error. If not, it returns any error string as the owner name. If owner is self, and if mask_if_self, returns "<self>".""" try: from pwd import getpwuid owner_uid = os.stat('/dev/console').st_uid self_uid = os.getuid() if mask_if_self and owner_uid == self_uid: return "<self>" owner_name = getpwuid(owner_uid).pw_name return owner_name except Exception as e: if raises: raise e else: return str(e)
def spawn_batch_jobs(job, shared_ids, input_args): """ Spawns an alignment job for every sample in the input configuration file """ samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
Spawns an alignment job for every sample in the input configuration file
Below is the the instruction that describes the task: ### Input: Spawns an alignment job for every sample in the input configuration file ### Response: def spawn_batch_jobs(job, shared_ids, input_args): """ Spawns an alignment job for every sample in the input configuration file """ samples = [] config = input_args['config'] with open(config, 'r') as f_in: for line in f_in: line = line.strip().split(',') uuid = line[0] urls = line[1:] samples.append((uuid, urls)) for sample in samples: job.addChildJobFn(alignment, shared_ids, input_args, sample, cores=32, memory='20 G', disk='100 G')
def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. portal_type = api.get_portal_type(obj) logger.error("ARAnalysesField doesn't accept objects from {} type. " "The object will be dismissed.".format(portal_type)) return None
Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None
Below is the the instruction that describes the task: ### Input: Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None ### Response: def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. portal_type = api.get_portal_type(obj) logger.error("ARAnalysesField doesn't accept objects from {} type. " "The object will be dismissed.".format(portal_type)) return None
def add_parameterized_validator(param_validator, base_tag, tag_prefix=None): """ Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that. """ # pylint: disable-msg=C0111,W0621 if not tag_prefix: tag_prefix = u'!~%s(' % param_validator.__name__ def multi_constructor(loader, tag_suffix, node): def temp_validator(node, schema): return param_validator(node, schema, *_split_params(tag_prefix, tag_suffix)) temp_validator.__name__ = str(tag_prefix + tag_suffix) return ContructorValidatorNode(base_tag, base_tag, temp_validator)(loader, node) yaml.add_multi_constructor(tag_prefix, multi_constructor)
Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that.
Below is the the instruction that describes the task: ### Input: Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that. ### Response: def add_parameterized_validator(param_validator, base_tag, tag_prefix=None): """ Add a parameterized validator for the given tag prefix. If tag_prefix is None, it is automatically constructed as u'!~%s(' % param_validator.__name__ A parametrized validator is a function that accepts a document node (in the form of a Python object), a schema node (also a Python object), and other parameters (integer or string) that directly come from its complete YAML name in the schema. It returns True if the document node is valid according to the schema node. Note that the validator function does not have to recurse in sub-nodes, because XYS already does that. """ # pylint: disable-msg=C0111,W0621 if not tag_prefix: tag_prefix = u'!~%s(' % param_validator.__name__ def multi_constructor(loader, tag_suffix, node): def temp_validator(node, schema): return param_validator(node, schema, *_split_params(tag_prefix, tag_suffix)) temp_validator.__name__ = str(tag_prefix + tag_suffix) return ContructorValidatorNode(base_tag, base_tag, temp_validator)(loader, node) yaml.add_multi_constructor(tag_prefix, multi_constructor)
def relative_date_to_date(p_date, p_offset=None): """ Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated) """ result = None p_date = p_date.lower() p_offset = p_offset or date.today() relative = re.match('(?P<length>-?[0-9]+)(?P<period>[dwmyb])$', p_date, re.I) monday = 'mo(n(day)?)?$' tuesday = 'tu(e(sday)?)?$' wednesday = 'we(d(nesday)?)?$' thursday = 'th(u(rsday)?)?$' friday = 'fr(i(day)?)?$' saturday = 'sa(t(urday)?)?$' sunday = 'su(n(day)?)?$' weekday = re.match('|'.join( [monday, tuesday, wednesday, thursday, friday, saturday, sunday]), p_date) if relative: length = relative.group('length') period = relative.group('period') result = _convert_pattern(length, period, p_offset) elif weekday: result = _convert_weekday_pattern(weekday.group(0)) elif re.match('tod(ay)?$', p_date): result = _convert_pattern('0', 'd') elif re.match('tom(orrow)?$', p_date): result = _convert_pattern('1', 'd') elif re.match('yes(terday)?$', p_date): result = _convert_pattern('-1', 'd') return result
Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated)
Below is the the instruction that describes the task: ### Input: Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated) ### Response: def relative_date_to_date(p_date, p_offset=None): """ Transforms a relative date into a date object. The following formats are understood: * [0-9][dwmy] * 'yesterday', 'today' or 'tomorrow' * days of the week (in full or abbreviated) """ result = None p_date = p_date.lower() p_offset = p_offset or date.today() relative = re.match('(?P<length>-?[0-9]+)(?P<period>[dwmyb])$', p_date, re.I) monday = 'mo(n(day)?)?$' tuesday = 'tu(e(sday)?)?$' wednesday = 'we(d(nesday)?)?$' thursday = 'th(u(rsday)?)?$' friday = 'fr(i(day)?)?$' saturday = 'sa(t(urday)?)?$' sunday = 'su(n(day)?)?$' weekday = re.match('|'.join( [monday, tuesday, wednesday, thursday, friday, saturday, sunday]), p_date) if relative: length = relative.group('length') period = relative.group('period') result = _convert_pattern(length, period, p_offset) elif weekday: result = _convert_weekday_pattern(weekday.group(0)) elif re.match('tod(ay)?$', p_date): result = _convert_pattern('0', 'd') elif re.match('tom(orrow)?$', p_date): result = _convert_pattern('1', 'd') elif re.match('yes(terday)?$', p_date): result = _convert_pattern('-1', 'd') return result
def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. """ parser = self.create_parser(argv[0], argv[1]) self.arguments = parser.parse_args(argv[2:]) handle_default_options(self.arguments) options = vars(self.arguments) self.execute(**options)
Set up any environment changes requested (e.g., Python path and Django settings), then run this command.
Below is the the instruction that describes the task: ### Input: Set up any environment changes requested (e.g., Python path and Django settings), then run this command. ### Response: def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. """ parser = self.create_parser(argv[0], argv[1]) self.arguments = parser.parse_args(argv[2:]) handle_default_options(self.arguments) options = vars(self.arguments) self.execute(**options)
def old_tracer_correlation( self ): """ Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`. """ if self.has_run: return self.atoms.sum_dr_squared() / float( self.number_of_jumps ) else: return None
Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`.
Below is the the instruction that describes the task: ### Input: Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`. ### Response: def old_tracer_correlation( self ): """ Deprecated tracer correlation factor for this simulation. Args: None Returns: (Float): The tracer correlation factor, f. Notes: This function assumes that the jump distance between sites has been normalised to a=1. If the jump distance is not equal to 1 then the value returned by this function should be divided by a^2. Even better, use `self.tracer_correlation`. """ if self.has_run: return self.atoms.sum_dr_squared() / float( self.number_of_jumps ) else: return None
def strftimegen(start_dt, end_dt): """ Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument. """ if start_dt > end_dt: raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt)) def iterftime(string): date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None] if not date_subs: yield string else: dt = start_dt date_path = string while end_dt >= dt: for item in date_subs: date_path = item[1].sub(dt.strftime(item[0]), date_path) yield date_path dt = dt + datetime.timedelta(days=1) return iterftime
Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument.
Below is the the instruction that describes the task: ### Input: Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument. ### Response: def strftimegen(start_dt, end_dt): """ Return a generator function for datetime format strings. The generator produce a day-by-day sequence starting from the first datetime to the second datetime argument. """ if start_dt > end_dt: raise ValueError("the start datetime is after the end datetime: (%r,%r)" % (start_dt, end_dt)) def iterftime(string): date_subs = [i for i in DATE_FORMATS if i[1].search(string) is not None] if not date_subs: yield string else: dt = start_dt date_path = string while end_dt >= dt: for item in date_subs: date_path = item[1].sub(dt.strftime(item[0]), date_path) yield date_path dt = dt + datetime.timedelta(days=1) return iterftime
def get_assessments_by_banks(self, bank_ids): """Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bins assessment_list = [] for bank_id in bank_ids: assessment_list += list( self.get_assessments_by_bank(bank_id)) return objects.AssessmentList(assessment_list)
Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* ### Response: def get_assessments_by_banks(self, bank_ids): """Gets the list of ``Assessments`` corresponding to a list of ``Banks``. arg: bank_ids (osid.id.IdList): list of bank ``Ids`` return: (osid.assessment.AssessmentList) - list of assessments raise: NullArgument - ``bank_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_resources_by_bins assessment_list = [] for bank_id in bank_ids: assessment_list += list( self.get_assessments_by_bank(bank_id)) return objects.AssessmentList(assessment_list)
def go_standby(self, comment=None): """ Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None """ self.make_request( NodeCommandFailed, method='update', resource='go_standby', params={'comment': comment})
Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None
Below is the the instruction that describes the task: ### Input: Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None ### Response: def go_standby(self, comment=None): """ Executes a Go-Standby operation on the specified node. To get the status of the current node/s, run :func:`status` :param str comment: optional comment to audit :raises NodeCommandFailed: engine cannot go standby :return: None """ self.make_request( NodeCommandFailed, method='update', resource='go_standby', params={'comment': comment})
def get_session_info(self, session=0): """get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. """ if session <= 0: session += self.session_number query = "SELECT * from sessions where session == ?" return self.db.execute(query, (session,)).fetchone()
get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`.
Below is the the instruction that describes the task: ### Input: get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. ### Response: def get_session_info(self, session=0): """get info about a session Parameters ---------- session : int Session number to retrieve. The current session is 0, and negative numbers count back from current session, so -1 is previous session. Returns ------- (session_id [int], start [datetime], end [datetime], num_cmds [int], remark [unicode]) Sessions that are running or did not exit cleanly will have `end=None` and `num_cmds=None`. """ if session <= 0: session += self.session_number query = "SELECT * from sessions where session == ?" return self.db.execute(query, (session,)).fetchone()
def retrieve(self, id) : """ Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict """ _, _, contact = self.http_client.get("/contacts/{id}".format(id=id)) return contact
Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict
Below is the the instruction that describes the task: ### Input: Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict ### Response: def retrieve(self, id) : """ Retrieve a single contact Returns a single contact available to the user, according to the unique contact ID provided If the specified contact does not exist, the request will return an error :calls: ``get /contacts/{id}`` :param int id: Unique identifier of a Contact. :return: Dictionary that support attriubte-style access and represent Contact resource. :rtype: dict """ _, _, contact = self.http_client.get("/contacts/{id}".format(id=id)) return contact
def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None): """Select which venv satisfy the received requirements.""" if uuid: logger.debug("Searching a venv by uuid: %s", uuid) venv = self._match_by_uuid(current_venvs, uuid) else: logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s", requirements, interpreter, options) venv = self._match_by_requirements(current_venvs, requirements, interpreter, options) if venv is None: logger.debug("No matching venv found :(") return logger.debug("Found a matching venv! %s", venv) return venv['metadata']
Select which venv satisfy the received requirements.
Below is the the instruction that describes the task: ### Input: Select which venv satisfy the received requirements. ### Response: def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None): """Select which venv satisfy the received requirements.""" if uuid: logger.debug("Searching a venv by uuid: %s", uuid) venv = self._match_by_uuid(current_venvs, uuid) else: logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s", requirements, interpreter, options) venv = self._match_by_requirements(current_venvs, requirements, interpreter, options) if venv is None: logger.debug("No matching venv found :(") return logger.debug("Found a matching venv! %s", venv) return venv['metadata']
def set_disk_cache(self, results, key=None): """Store result in disk cache with key matching model state.""" if not getattr(self, 'disk_cache_location', False): self.init_disk_cache() disk_cache = shelve.open(self.disk_cache_location) key = self.model.hash if key is None else key disk_cache[key] = results disk_cache.close()
Store result in disk cache with key matching model state.
Below is the the instruction that describes the task: ### Input: Store result in disk cache with key matching model state. ### Response: def set_disk_cache(self, results, key=None): """Store result in disk cache with key matching model state.""" if not getattr(self, 'disk_cache_location', False): self.init_disk_cache() disk_cache = shelve.open(self.disk_cache_location) key = self.model.hash if key is None else key disk_cache[key] = results disk_cache.close()
def intermediates(self): """ A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server """ if self._ssl is None: self._raise_closed() if self._certificate is None: self._read_certificates() return self._intermediates
A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server
Below is the the instruction that describes the task: ### Input: A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server ### Response: def intermediates(self): """ A list of asn1crypto.x509.Certificate objects that were presented as intermediates by the server """ if self._ssl is None: self._raise_closed() if self._certificate is None: self._read_certificates() return self._intermediates
def get_var_name(nc): """Guesses the variable_name of an open NetCDF file """ non_variable_names = [ 'lat', 'lat_bnds', 'lon', 'lon_bnds', 'time', 'latitude', 'longitude', 'bnds' ] _vars = set(nc.variables.keys()) _vars.difference_update(set(non_variable_names)) if len(_vars) == 1: return _vars.pop() return None
Guesses the variable_name of an open NetCDF file
Below is the the instruction that describes the task: ### Input: Guesses the variable_name of an open NetCDF file ### Response: def get_var_name(nc): """Guesses the variable_name of an open NetCDF file """ non_variable_names = [ 'lat', 'lat_bnds', 'lon', 'lon_bnds', 'time', 'latitude', 'longitude', 'bnds' ] _vars = set(nc.variables.keys()) _vars.difference_update(set(non_variable_names)) if len(_vars) == 1: return _vars.pop() return None
def _get_df(self): """Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band`` """ grids = {} df = pd.DataFrame() for bnd in self.bands: s,b = self.get_band(bnd, **self.kwargs) logging.debug('loading {} band from {}'.format(b,s)) if s not in grids: grids[s] = self.get_hdf(s) if self.common_columns[0] not in df: df[list(self.common_columns)] = grids[s][list(self.common_columns)] col = grids[s][b] n_nan = np.isnan(col).sum() if n_nan > 0: logging.debug('{} NANs in {} column'.format(n_nan, b)) df.loc[:, bnd] = col.values #dunno why it has to be this way; something # funny with indexing. return df
Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band``
Below is the the instruction that describes the task: ### Input: Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band`` ### Response: def _get_df(self): """Returns stellar model grid with desired bandpasses and with standard column names bands must be iterable, and are parsed according to :func:``get_band`` """ grids = {} df = pd.DataFrame() for bnd in self.bands: s,b = self.get_band(bnd, **self.kwargs) logging.debug('loading {} band from {}'.format(b,s)) if s not in grids: grids[s] = self.get_hdf(s) if self.common_columns[0] not in df: df[list(self.common_columns)] = grids[s][list(self.common_columns)] col = grids[s][b] n_nan = np.isnan(col).sum() if n_nan > 0: logging.debug('{} NANs in {} column'.format(n_nan, b)) df.loc[:, bnd] = col.values #dunno why it has to be this way; something # funny with indexing. return df
def adafactor_optimizer_from_hparams(hparams, lr): """Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values """ if hparams.optimizer_adafactor_decay_type == "Adam": decay_rate = adafactor_decay_rate_adam( hparams.optimizer_adafactor_beta2) elif hparams.optimizer_adafactor_decay_type == "pow": decay_rate = adafactor_decay_rate_pow( hparams.optimizer_adafactor_memory_exponent) else: raise ValueError("unknown optimizer_adafactor_decay_type") return AdafactorOptimizer( multiply_by_parameter_scale=( hparams.optimizer_adafactor_multiply_by_parameter_scale), learning_rate=lr, decay_rate=decay_rate, beta1=hparams.optimizer_adafactor_beta1, clipping_threshold=hparams.optimizer_adafactor_clipping_threshold, factored=hparams.optimizer_adafactor_factored)
Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values
Below is the the instruction that describes the task: ### Input: Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values ### Response: def adafactor_optimizer_from_hparams(hparams, lr): """Create an Adafactor optimizer based on model hparams. Args: hparams: model hyperparameters lr: learning rate scalar. Returns: an AdafactorOptimizer Raises: ValueError: on illegal values """ if hparams.optimizer_adafactor_decay_type == "Adam": decay_rate = adafactor_decay_rate_adam( hparams.optimizer_adafactor_beta2) elif hparams.optimizer_adafactor_decay_type == "pow": decay_rate = adafactor_decay_rate_pow( hparams.optimizer_adafactor_memory_exponent) else: raise ValueError("unknown optimizer_adafactor_decay_type") return AdafactorOptimizer( multiply_by_parameter_scale=( hparams.optimizer_adafactor_multiply_by_parameter_scale), learning_rate=lr, decay_rate=decay_rate, beta1=hparams.optimizer_adafactor_beta1, clipping_threshold=hparams.optimizer_adafactor_clipping_threshold, factored=hparams.optimizer_adafactor_factored)