code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def align_statements(stmts1, stmts2, keyfun=None): """Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None. """ def name_keyfun(stmt): return tuple(a.name if a is not None else None for a in stmt.agent_list()) if not keyfun: keyfun = name_keyfun matches = [] keys1 = [keyfun(s) for s in stmts1] keys2 = [keyfun(s) for s in stmts2] for stmt, key in zip(stmts1, keys1): try: match_idx = keys2.index(key) match_stmt = stmts2[match_idx] matches.append((stmt, match_stmt)) except ValueError: matches.append((stmt, None)) for stmt, key in zip(stmts2, keys2): try: match_idx = keys1.index(key) except ValueError: matches.append((None, stmt)) return matches
Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None.
Below is the the instruction that describes the task: ### Input: Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None. ### Response: def align_statements(stmts1, stmts2, keyfun=None): """Return alignment of two lists of statements by key. Parameters ---------- stmts1 : list[indra.statements.Statement] A list of INDRA Statements to align stmts2 : list[indra.statements.Statement] A list of INDRA Statements to align keyfun : Optional[function] A function that takes a Statement as an argument and returns a key to align by. If not given, the default key function is a tuble of the names of the Agents in the Statement. Return ------ matches : list(tuple) A list of tuples where each tuple has two elements, the first corresponding to an element of the stmts1 list and the second corresponding to an element of the stmts2 list. If a given element is not matched, its corresponding pair in the tuple is None. """ def name_keyfun(stmt): return tuple(a.name if a is not None else None for a in stmt.agent_list()) if not keyfun: keyfun = name_keyfun matches = [] keys1 = [keyfun(s) for s in stmts1] keys2 = [keyfun(s) for s in stmts2] for stmt, key in zip(stmts1, keys1): try: match_idx = keys2.index(key) match_stmt = stmts2[match_idx] matches.append((stmt, match_stmt)) except ValueError: matches.append((stmt, None)) for stmt, key in zip(stmts2, keys2): try: match_idx = keys1.index(key) except ValueError: matches.append((None, stmt)) return matches
def from_byte_array(cls, bytes_): """Decodes a run-length encoded ByteArray and returns a Bitmap. The ByteArray decompresses to a sequence of 32-bit values, which are stored as a byte string. (The specific encoding depends on Form.depth.) """ runs = cls._length_run_coding.parse(bytes_) pixels = (run.pixels for run in runs.data) data = "".join(itertools.chain.from_iterable(pixels)) return cls(data)
Decodes a run-length encoded ByteArray and returns a Bitmap. The ByteArray decompresses to a sequence of 32-bit values, which are stored as a byte string. (The specific encoding depends on Form.depth.)
Below is the the instruction that describes the task: ### Input: Decodes a run-length encoded ByteArray and returns a Bitmap. The ByteArray decompresses to a sequence of 32-bit values, which are stored as a byte string. (The specific encoding depends on Form.depth.) ### Response: def from_byte_array(cls, bytes_): """Decodes a run-length encoded ByteArray and returns a Bitmap. The ByteArray decompresses to a sequence of 32-bit values, which are stored as a byte string. (The specific encoding depends on Form.depth.) """ runs = cls._length_run_coding.parse(bytes_) pixels = (run.pixels for run in runs.data) data = "".join(itertools.chain.from_iterable(pixels)) return cls(data)
def calc_bhhh_hessian_approximation_mixed_logit(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, rows_to_mixers, choice_vector, utility_transform, ridge=None, weights=None): """ Parameters ---------- params : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features + num_coefs_being_mixed). design_3d : 3D ndarray. All elements should be ints, floats, or longs. Should have one row per observation per available alternative. The second axis should have as many elements as there are draws from the mixing distributions of the coefficients. The last axis should have one element per index coefficient being estimated. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 2D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated and the given draw of the random coefficients. There should be one column for each draw of the random coefficients. There should have one row per individual per choice situation per available alternative. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Default == None. Returns ------- bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`. The returned array is the BHHH approximation of the Fisher Information Matrix. I.e it is the negative of the sum of the outer product of each individual's gradient with itself. """ # Calculate the weights for the sample if weights is None: weights = np.ones(design_3d.shape[0]) weights_per_obs =\ np.max(rows_to_mixers.toarray() * weights[:, None], axis=0) # Calculate the regular probability array. Note the implicit assumption # that params == index coefficients. prob_array = general_calc_probabilities(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, utility_transform, return_long_probs=True) # Calculate the simulated probability of correctly predicting each persons # sequence of choices. Note that this function implicitly assumes that the # mixing unit is the individual prob_results = calc_choice_sequence_probs(prob_array, choice_vector, rows_to_mixers, return_type="all") # Calculate the sequence probabilities given random draws # and calculate the overal simulated probabilities sequence_prob_array = prob_results[1] simulated_probs = prob_results[0] # Convert the various probabilties to long format long_sequence_prob_array = rows_to_mixers.dot(sequence_prob_array) long_simulated_probs = rows_to_mixers.dot(simulated_probs) # Scale sequence probabilites given random draws by simulated probabilities scaled_sequence_probs = (long_sequence_prob_array / long_simulated_probs[:, None]) # Calculate the scaled error. Will have shape == (num_rows, num_draws) scaled_error = ((choice_vector[:, None] - prob_array) * scaled_sequence_probs) # Calculate the gradient. Note that the lines below assume that we are # taking the gradient of an MNL model. Should refactor to make use of the # built in gradient function for logit-type models. Should also refactor # the gradient function for logit-type models to be able to handle 2D # systematic utility arrays. `gradient` will have shape # (design_3d.shape[0], design_3d.shape[2]) gradient = (scaled_error[:, :, None] * design_3d).mean(axis=1) gradient_per_obs = rows_to_mixers.T.dot(gradient) bhhh_matrix =\ gradient_per_obs.T.dot(weights_per_obs[:, None] * gradient_per_obs) if ridge is not None: bhhh_matrix -= 2 * ridge # Note the "-1" is because we are approximating the Fisher information # matrix which has a negative one in the front of it? return -1 * bhhh_matrix
Parameters ---------- params : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features + num_coefs_being_mixed). design_3d : 3D ndarray. All elements should be ints, floats, or longs. Should have one row per observation per available alternative. The second axis should have as many elements as there are draws from the mixing distributions of the coefficients. The last axis should have one element per index coefficient being estimated. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 2D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated and the given draw of the random coefficients. There should be one column for each draw of the random coefficients. There should have one row per individual per choice situation per available alternative. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Default == None. Returns ------- bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`. The returned array is the BHHH approximation of the Fisher Information Matrix. I.e it is the negative of the sum of the outer product of each individual's gradient with itself.
Below is the the instruction that describes the task: ### Input: Parameters ---------- params : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features + num_coefs_being_mixed). design_3d : 3D ndarray. All elements should be ints, floats, or longs. Should have one row per observation per available alternative. The second axis should have as many elements as there are draws from the mixing distributions of the coefficients. The last axis should have one element per index coefficient being estimated. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 2D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated and the given draw of the random coefficients. There should be one column for each draw of the random coefficients. There should have one row per individual per choice situation per available alternative. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Default == None. Returns ------- bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`. The returned array is the BHHH approximation of the Fisher Information Matrix. I.e it is the negative of the sum of the outer product of each individual's gradient with itself. ### Response: def calc_bhhh_hessian_approximation_mixed_logit(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, rows_to_mixers, choice_vector, utility_transform, ridge=None, weights=None): """ Parameters ---------- params : 1D ndarray. All elements should by ints, floats, or longs. Should have 1 element for each utility coefficient being estimated (i.e. num_features + num_coefs_being_mixed). design_3d : 3D ndarray. All elements should be ints, floats, or longs. Should have one row per observation per available alternative. The second axis should have as many elements as there are draws from the mixing distributions of the coefficients. The last axis should have one element per index coefficient being estimated. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_obs : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_alts : 2D scipy sparse array. All elements should be zeros and ones. Should have one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. choice_vector : 1D ndarray. All elements should be either ones or zeros. There should be one row per observation per available alternative for the given observation. Elements denote the alternative which is chosen by the given observation with a 1 and a zero otherwise. utility_transform : callable. Should accept a 1D array of systematic utility values, a 1D array of alternative IDs, and miscellaneous args and kwargs. Should return a 2D array whose elements contain the appropriately transformed systematic utility values, based on the current model being evaluated and the given draw of the random coefficients. There should be one column for each draw of the random coefficients. There should have one row per individual per choice situation per available alternative. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a float is passed, then that float determines the ridge penalty for the optimization. Default = None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Default == None. Returns ------- bhhh_matrix : 2D ndarray of shape `(design.shape[1], design.shape[1])`. The returned array is the BHHH approximation of the Fisher Information Matrix. I.e it is the negative of the sum of the outer product of each individual's gradient with itself. """ # Calculate the weights for the sample if weights is None: weights = np.ones(design_3d.shape[0]) weights_per_obs =\ np.max(rows_to_mixers.toarray() * weights[:, None], axis=0) # Calculate the regular probability array. Note the implicit assumption # that params == index coefficients. prob_array = general_calc_probabilities(params, design_3d, alt_IDs, rows_to_obs, rows_to_alts, utility_transform, return_long_probs=True) # Calculate the simulated probability of correctly predicting each persons # sequence of choices. Note that this function implicitly assumes that the # mixing unit is the individual prob_results = calc_choice_sequence_probs(prob_array, choice_vector, rows_to_mixers, return_type="all") # Calculate the sequence probabilities given random draws # and calculate the overal simulated probabilities sequence_prob_array = prob_results[1] simulated_probs = prob_results[0] # Convert the various probabilties to long format long_sequence_prob_array = rows_to_mixers.dot(sequence_prob_array) long_simulated_probs = rows_to_mixers.dot(simulated_probs) # Scale sequence probabilites given random draws by simulated probabilities scaled_sequence_probs = (long_sequence_prob_array / long_simulated_probs[:, None]) # Calculate the scaled error. Will have shape == (num_rows, num_draws) scaled_error = ((choice_vector[:, None] - prob_array) * scaled_sequence_probs) # Calculate the gradient. Note that the lines below assume that we are # taking the gradient of an MNL model. Should refactor to make use of the # built in gradient function for logit-type models. Should also refactor # the gradient function for logit-type models to be able to handle 2D # systematic utility arrays. `gradient` will have shape # (design_3d.shape[0], design_3d.shape[2]) gradient = (scaled_error[:, :, None] * design_3d).mean(axis=1) gradient_per_obs = rows_to_mixers.T.dot(gradient) bhhh_matrix =\ gradient_per_obs.T.dot(weights_per_obs[:, None] * gradient_per_obs) if ridge is not None: bhhh_matrix -= 2 * ridge # Note the "-1" is because we are approximating the Fisher information # matrix which has a negative one in the front of it? return -1 * bhhh_matrix
def consume_messages(self, batchsize): """ Get messages batch from the reservoir """ if not self._reservoir: self.finished = True return for msg in self._reservoir[:batchsize]: yield msg self._reservoir = self._reservoir[batchsize:]
Get messages batch from the reservoir
Below is the the instruction that describes the task: ### Input: Get messages batch from the reservoir ### Response: def consume_messages(self, batchsize): """ Get messages batch from the reservoir """ if not self._reservoir: self.finished = True return for msg in self._reservoir[:batchsize]: yield msg self._reservoir = self._reservoir[batchsize:]
def _reconnect_delay(self): """ Calculate reconnection delay. """ if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED: if self._reconnect_attempts >= len(self.RECONNECT_DELAYS): return self.RECONNECT_DELAYS[-1] else: return self.RECONNECT_DELAYS[self._reconnect_attempts] else: return 0
Calculate reconnection delay.
Below is the the instruction that describes the task: ### Input: Calculate reconnection delay. ### Response: def _reconnect_delay(self): """ Calculate reconnection delay. """ if self.RECONNECT_ON_ERROR and self.RECONNECT_DELAYED: if self._reconnect_attempts >= len(self.RECONNECT_DELAYS): return self.RECONNECT_DELAYS[-1] else: return self.RECONNECT_DELAYS[self._reconnect_attempts] else: return 0
def delete(self, filename=''): """Deletes given file or directory. If no filename is passed, current directory is removed. """ self._raise_if_none() fn = path_join(self.path, filename) try: if isfile(fn): remove(fn) else: removedirs(fn) except OSError as why: if why.errno == errno.ENOENT: pass else: raise why
Deletes given file or directory. If no filename is passed, current directory is removed.
Below is the the instruction that describes the task: ### Input: Deletes given file or directory. If no filename is passed, current directory is removed. ### Response: def delete(self, filename=''): """Deletes given file or directory. If no filename is passed, current directory is removed. """ self._raise_if_none() fn = path_join(self.path, filename) try: if isfile(fn): remove(fn) else: removedirs(fn) except OSError as why: if why.errno == errno.ENOENT: pass else: raise why
def _decodeAddressField(byteIter, smscField=False, log=False): """ Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple """ addressLen = next(byteIter) if addressLen > 0: toa = next(byteIter) ton = (toa & 0x70) # bits 6,5,4 of type-of-address == type-of-number if ton == 0x50: # Alphanumberic number addressLen = int(math.ceil(addressLen / 2.0)) septets = unpackSeptets(byteIter, addressLen) addressValue = decodeGsm7(septets) return (addressValue, (addressLen + 2)) else: # ton == 0x00: Unknown (might be international, local, etc) - leave as is # ton == 0x20: National number if smscField: addressValue = decodeSemiOctets(byteIter, addressLen-1) else: if addressLen % 2: addressLen = int(addressLen / 2) + 1 else: addressLen = int(addressLen / 2) addressValue = decodeSemiOctets(byteIter, addressLen) addressLen += 1 # for the return value, add the toa byte if ton == 0x10: # International number addressValue = '+' + addressValue return (addressValue, (addressLen + 1)) else: return (None, 1)
Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple
Below is the the instruction that describes the task: ### Input: Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple ### Response: def _decodeAddressField(byteIter, smscField=False, log=False): """ Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple """ addressLen = next(byteIter) if addressLen > 0: toa = next(byteIter) ton = (toa & 0x70) # bits 6,5,4 of type-of-address == type-of-number if ton == 0x50: # Alphanumberic number addressLen = int(math.ceil(addressLen / 2.0)) septets = unpackSeptets(byteIter, addressLen) addressValue = decodeGsm7(septets) return (addressValue, (addressLen + 2)) else: # ton == 0x00: Unknown (might be international, local, etc) - leave as is # ton == 0x20: National number if smscField: addressValue = decodeSemiOctets(byteIter, addressLen-1) else: if addressLen % 2: addressLen = int(addressLen / 2) + 1 else: addressLen = int(addressLen / 2) addressValue = decodeSemiOctets(byteIter, addressLen) addressLen += 1 # for the return value, add the toa byte if ton == 0x10: # International number addressValue = '+' + addressValue return (addressValue, (addressLen + 1)) else: return (None, 1)
def this_year(self): """ Get AnnouncementRequests from this school year only. """ start_date, end_date = get_date_range_this_year() return Announcement.objects.filter(added__gte=start_date, added__lte=end_date)
Get AnnouncementRequests from this school year only.
Below is the the instruction that describes the task: ### Input: Get AnnouncementRequests from this school year only. ### Response: def this_year(self): """ Get AnnouncementRequests from this school year only. """ start_date, end_date = get_date_range_this_year() return Announcement.objects.filter(added__gte=start_date, added__lte=end_date)
def length(self): """ Returns the length (in days) of the task, by considering the start date and the due date. When there is no start date, its creation date is used. Returns 0 when one of these dates is missing. """ start = self.start_date() or self.creation_date() due = self.due_date() if start and due and start < due: diff = due - start return diff.days else: return 0
Returns the length (in days) of the task, by considering the start date and the due date. When there is no start date, its creation date is used. Returns 0 when one of these dates is missing.
Below is the the instruction that describes the task: ### Input: Returns the length (in days) of the task, by considering the start date and the due date. When there is no start date, its creation date is used. Returns 0 when one of these dates is missing. ### Response: def length(self): """ Returns the length (in days) of the task, by considering the start date and the due date. When there is no start date, its creation date is used. Returns 0 when one of these dates is missing. """ start = self.start_date() or self.creation_date() due = self.due_date() if start and due and start < due: diff = due - start return diff.days else: return 0
def transform(grammar, text): """Transform text by replacing matches to grammar.""" results = [] intervals = [] for result, start, stop in all_matches(grammar, text): if result is not ignore_transform: internal_assert(isinstance(result, str), "got non-string transform result", result) if start == 0 and stop == len(text): return result results.append(result) intervals.append((start, stop)) if not results: return None split_indices = [0] split_indices.extend(start for start, _ in intervals) split_indices.extend(stop for _, stop in intervals) split_indices.sort() split_indices.append(None) out = [] for i in range(len(split_indices) - 1): if i % 2 == 0: start, stop = split_indices[i], split_indices[i + 1] out.append(text[start:stop]) else: out.append(results[i // 2]) if i // 2 < len(results) - 1: raise CoconutInternalException("unused transform results", results[i // 2 + 1:]) if stop is not None: raise CoconutInternalException("failed to properly split text to be transformed") return "".join(out)
Transform text by replacing matches to grammar.
Below is the the instruction that describes the task: ### Input: Transform text by replacing matches to grammar. ### Response: def transform(grammar, text): """Transform text by replacing matches to grammar.""" results = [] intervals = [] for result, start, stop in all_matches(grammar, text): if result is not ignore_transform: internal_assert(isinstance(result, str), "got non-string transform result", result) if start == 0 and stop == len(text): return result results.append(result) intervals.append((start, stop)) if not results: return None split_indices = [0] split_indices.extend(start for start, _ in intervals) split_indices.extend(stop for _, stop in intervals) split_indices.sort() split_indices.append(None) out = [] for i in range(len(split_indices) - 1): if i % 2 == 0: start, stop = split_indices[i], split_indices[i + 1] out.append(text[start:stop]) else: out.append(results[i // 2]) if i // 2 < len(results) - 1: raise CoconutInternalException("unused transform results", results[i // 2 + 1:]) if stop is not None: raise CoconutInternalException("failed to properly split text to be transformed") return "".join(out)
def format_modified(self, modified, sep=" "): """Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode """ if modified is not None: return modified.strftime("%Y-%m-%d{0}%H:%M:%S.%fZ".format(sep)) return u""
Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode
Below is the the instruction that describes the task: ### Input: Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode ### Response: def format_modified(self, modified, sep=" "): """Format modification date in UTC if it's not None. @param modified: modification date in UTC @ptype modified: datetime or None @return: formatted date or empty string @rtype: unicode """ if modified is not None: return modified.strftime("%Y-%m-%d{0}%H:%M:%S.%fZ".format(sep)) return u""
def distinctBy(iterable, fn): """ uniq operation with key selector """ s = set() for i in iterable: r = fn(i) if r not in s: s.add(r) yield i
uniq operation with key selector
Below is the the instruction that describes the task: ### Input: uniq operation with key selector ### Response: def distinctBy(iterable, fn): """ uniq operation with key selector """ s = set() for i in iterable: r = fn(i) if r not in s: s.add(r) yield i
def merge(self, other): """ Merge another stats. """ self.stats = set(self.stats) self.stats.update(set(other.stats)) self.stats = list(self.stats) if other._error: self._error = True
Merge another stats.
Below is the the instruction that describes the task: ### Input: Merge another stats. ### Response: def merge(self, other): """ Merge another stats. """ self.stats = set(self.stats) self.stats.update(set(other.stats)) self.stats = list(self.stats) if other._error: self._error = True
def _prepare_lines(self, lines): """ Prepare the lines read from the text file before starting to process it. """ result = list() for line in lines: # Remove all whitespace characters (e.g. spaces, line breaks, etc.) # from the start and end of the line. line = line.strip() # Replace all tabs with spaces. line = line.replace("\t", " ") # Replace all repeating spaces with a single space. while line.find(" ") > -1: line = line.replace(" ", " ") result.append(line) return result
Prepare the lines read from the text file before starting to process it.
Below is the the instruction that describes the task: ### Input: Prepare the lines read from the text file before starting to process it. ### Response: def _prepare_lines(self, lines): """ Prepare the lines read from the text file before starting to process it. """ result = list() for line in lines: # Remove all whitespace characters (e.g. spaces, line breaks, etc.) # from the start and end of the line. line = line.strip() # Replace all tabs with spaces. line = line.replace("\t", " ") # Replace all repeating spaces with a single space. while line.find(" ") > -1: line = line.replace(" ", " ") result.append(line) return result
def drawQuad(page, quad, color=None, fill=None, dashes=None, width=1, roundCap=False, morph=None, overlay=True): """Draw a quadrilateral. """ img = page.newShape() Q = img.drawQuad(Quad(quad)) img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph) img.commit(overlay) return Q
Draw a quadrilateral.
Below is the the instruction that describes the task: ### Input: Draw a quadrilateral. ### Response: def drawQuad(page, quad, color=None, fill=None, dashes=None, width=1, roundCap=False, morph=None, overlay=True): """Draw a quadrilateral. """ img = page.newShape() Q = img.drawQuad(Quad(quad)) img.finish(color=color, fill=fill, dashes=dashes, width=width, roundCap=roundCap, morph=morph) img.commit(overlay) return Q
def loggable(obj): """Return "True" if the obj implements the minimum Logger API required by the 'trace' decorator. """ if isinstance(obj, logging.Logger): return True else: return (inspect.isclass(obj) and inspect.ismethod(getattr(obj, 'debug', None)) and inspect.ismethod(getattr(obj, 'isEnabledFor', None)) and inspect.ismethod(getattr(obj, 'setLevel', None)))
Return "True" if the obj implements the minimum Logger API required by the 'trace' decorator.
Below is the the instruction that describes the task: ### Input: Return "True" if the obj implements the minimum Logger API required by the 'trace' decorator. ### Response: def loggable(obj): """Return "True" if the obj implements the minimum Logger API required by the 'trace' decorator. """ if isinstance(obj, logging.Logger): return True else: return (inspect.isclass(obj) and inspect.ismethod(getattr(obj, 'debug', None)) and inspect.ismethod(getattr(obj, 'isEnabledFor', None)) and inspect.ismethod(getattr(obj, 'setLevel', None)))
def closest_point_of_approach( traffic: Traffic, lateral_separation: float, vertical_separation: float, projection: Union[pyproj.Proj, crs.Projection, None] = None, round_t: str = "d", max_workers: int = 4, ) -> CPA: """ Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors. """ if projection is None: logging.warn("Defaulting to projection EuroPP()") projection = crs.EuroPP() if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) def yield_pairs(t_chunk: Traffic): """ This function yields all pairs of possible candidates for a CPA calculation. """ # combinations types Iterator[Tuple[T, ...]] for first, second in cast( Iterator[Tuple[Flight, Flight]], combinations(t_chunk, 2) ): # cast are necessary because of the lru_cache × property bug if ( cast(pd.Timestamp, first.start) > cast(pd.Timestamp, second.stop) ) or ( cast(pd.Timestamp, second.start) > cast(pd.Timestamp, first.stop) ): # Flights must fly at the same time continue if ( first.min("altitude") > second.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if ( second.min("altitude") > first.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if first.min("x") > second.max("x") + lateral_separation: # Bounding boxes in x must cross continue if second.min("x") > first.max("x") + lateral_separation: # Bounding boxes in x must cross continue if first.min("y") > second.max("y") + lateral_separation: # Bounding boxes in y must cross continue if second.min("y") > first.max("y") + lateral_separation: # Bounding boxes in y must cross continue # Next step is to check the 2D footprint of the trajectories # intersect. Before computing the intersection we bufferize the # trajectories by half the requested separation. first_shape = first.project_shape(projection) second_shape = second.project_shape(projection) if first_shape is None or second_shape is None: continue first_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) second_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) if first_shape.intersects(second_shape): yield first, second t_xyt = ( traffic.airborne() .compute_xy(projection) .assign(round_t=lambda df: df.timestamp.dt.round(round_t)) ) cumul = list() # Multiprocessing is implemented on each timerange slot only. # TODO: it would probably be more efficient to multiprocess over each # t_chunk rather than multiprocess the distance computation. for _, t_chunk in tqdm( t_xyt.groupby("round_t"), total=len(set(t_xyt.data.round_t)) ): with ProcessPoolExecutor(max_workers=max_workers) as executor: tasks = { # TODO submit(Flight.distance, first, second) executor.submit(first.distance, second): ( first.flight_id, second.flight_id, ) for (first, second) in yield_pairs(Traffic(t_chunk)) } for future in as_completed(tasks): cumul.append(future.result()) return CPA(pd.concat(cumul, sort=False))
Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors.
Below is the the instruction that describes the task: ### Input: Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors. ### Response: def closest_point_of_approach( traffic: Traffic, lateral_separation: float, vertical_separation: float, projection: Union[pyproj.Proj, crs.Projection, None] = None, round_t: str = "d", max_workers: int = 4, ) -> CPA: """ Computes a CPA dataframe for all pairs of trajectories candidates for being separated by less than lateral_separation in vertical_separation. In order to be computed efficiently, the method needs the following parameters: - projection: a first filtering is applied on the bounding boxes of trajectories, expressed in meters. You need to provide a decent projection able to approximate distances by Euclide formula. By default, EuroPP() projection is considered, but a non explicit argument will raise a warning. - round_t: an additional column will be added in the DataFrame to group trajectories by relevant time frames. Distance computations will be considered only between trajectories flown in the same time frame. By default, the 'd' pandas freq parameter is considered, to group trajectories by day, but other ways of splitting ('h') may be more relevant and impact performance. - max_workers: distance computations are spread over a given number of processors. """ if projection is None: logging.warn("Defaulting to projection EuroPP()") projection = crs.EuroPP() if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) def yield_pairs(t_chunk: Traffic): """ This function yields all pairs of possible candidates for a CPA calculation. """ # combinations types Iterator[Tuple[T, ...]] for first, second in cast( Iterator[Tuple[Flight, Flight]], combinations(t_chunk, 2) ): # cast are necessary because of the lru_cache × property bug if ( cast(pd.Timestamp, first.start) > cast(pd.Timestamp, second.stop) ) or ( cast(pd.Timestamp, second.start) > cast(pd.Timestamp, first.stop) ): # Flights must fly at the same time continue if ( first.min("altitude") > second.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if ( second.min("altitude") > first.max("altitude") + vertical_separation ): # Bounding boxes in altitude must cross continue if first.min("x") > second.max("x") + lateral_separation: # Bounding boxes in x must cross continue if second.min("x") > first.max("x") + lateral_separation: # Bounding boxes in x must cross continue if first.min("y") > second.max("y") + lateral_separation: # Bounding boxes in y must cross continue if second.min("y") > first.max("y") + lateral_separation: # Bounding boxes in y must cross continue # Next step is to check the 2D footprint of the trajectories # intersect. Before computing the intersection we bufferize the # trajectories by half the requested separation. first_shape = first.project_shape(projection) second_shape = second.project_shape(projection) if first_shape is None or second_shape is None: continue first_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) second_shape = first_shape.simplify(1e3).buffer( lateral_separation / 2 ) if first_shape.intersects(second_shape): yield first, second t_xyt = ( traffic.airborne() .compute_xy(projection) .assign(round_t=lambda df: df.timestamp.dt.round(round_t)) ) cumul = list() # Multiprocessing is implemented on each timerange slot only. # TODO: it would probably be more efficient to multiprocess over each # t_chunk rather than multiprocess the distance computation. for _, t_chunk in tqdm( t_xyt.groupby("round_t"), total=len(set(t_xyt.data.round_t)) ): with ProcessPoolExecutor(max_workers=max_workers) as executor: tasks = { # TODO submit(Flight.distance, first, second) executor.submit(first.distance, second): ( first.flight_id, second.flight_id, ) for (first, second) in yield_pairs(Traffic(t_chunk)) } for future in as_completed(tasks): cumul.append(future.result()) return CPA(pd.concat(cumul, sort=False))
def embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=1.0, smear_vartype=None): """Embed a binary quadratic model onto a target graph. Args: source_bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to embed. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a variable in the target graph and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`. smear_vartype (:class:`.Vartype`, optional, default=None): When a single variable is embedded, it's linear bias is 'smeared' evenly over the chain. This parameter determines whether the variable is smeared in SPIN or BINARY space. By default the embedding is done according to the given source_bqm. Returns: :obj:`.BinaryQuadraticModel`: Target binary quadratic model. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Target graph is a graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graphs >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, target) >>> target_bqm.quadratic[(0, 1)] == bqm.quadratic[('a', 'b')] True >>> target_bqm.quadratic # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample(target_bqm) >>> samples.record.sample # doctest: +SKIP array([[-1, -1, -1, -1], [ 1, -1, -1, -1], [ 1, 1, -1, -1], [-1, 1, -1, -1], [-1, 1, 1, -1], >>> # Snipped above samples for brevity """ if smear_vartype is dimod.SPIN and source_bqm.vartype is dimod.BINARY: return embed_bqm(source_bqm.spin, embedding, target_adjacency, chain_strength=chain_strength, smear_vartype=None).binary elif smear_vartype is dimod.BINARY and source_bqm.vartype is dimod.SPIN: return embed_bqm(source_bqm.binary, embedding, target_adjacency, chain_strength=chain_strength, smear_vartype=None).spin # create a new empty binary quadratic model with the same class as source_bqm target_bqm = source_bqm.empty(source_bqm.vartype) # add the offset target_bqm.add_offset(source_bqm.offset) # start with the linear biases, spreading the source bias equally over the target variables in # the chain for v, bias in iteritems(source_bqm.linear): if v in embedding: chain = embedding[v] else: raise MissingChainError(v) if any(u not in target_adjacency for u in chain): raise InvalidNodeError(v, next(u not in target_adjacency for u in chain)) b = bias / len(chain) target_bqm.add_variables_from({u: b for u in chain}) # next up the quadratic biases, spread the quadratic biases evenly over the available # interactions for (u, v), bias in iteritems(source_bqm.quadratic): available_interactions = {(s, t) for s in embedding[u] for t in embedding[v] if s in target_adjacency[t]} if not available_interactions: raise MissingEdgeError(u, v) b = bias / len(available_interactions) target_bqm.add_interactions_from((u, v, b) for u, v in available_interactions) for chain in itervalues(embedding): # in the case where the chain has length 1, there are no chain quadratic biases, but we # none-the-less want the chain variables to appear in the target_bqm if len(chain) == 1: v, = chain target_bqm.add_variable(v, 0.0) continue quadratic_chain_biases = chain_to_quadratic(chain, target_adjacency, chain_strength) target_bqm.add_interactions_from(quadratic_chain_biases, vartype=dimod.SPIN) # these are spin # add the energy for satisfied chains to the offset energy_diff = -sum(itervalues(quadratic_chain_biases)) target_bqm.add_offset(energy_diff) return target_bqm
Embed a binary quadratic model onto a target graph. Args: source_bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to embed. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a variable in the target graph and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`. smear_vartype (:class:`.Vartype`, optional, default=None): When a single variable is embedded, it's linear bias is 'smeared' evenly over the chain. This parameter determines whether the variable is smeared in SPIN or BINARY space. By default the embedding is done according to the given source_bqm. Returns: :obj:`.BinaryQuadraticModel`: Target binary quadratic model. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Target graph is a graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graphs >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, target) >>> target_bqm.quadratic[(0, 1)] == bqm.quadratic[('a', 'b')] True >>> target_bqm.quadratic # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample(target_bqm) >>> samples.record.sample # doctest: +SKIP array([[-1, -1, -1, -1], [ 1, -1, -1, -1], [ 1, 1, -1, -1], [-1, 1, -1, -1], [-1, 1, 1, -1], >>> # Snipped above samples for brevity
Below is the the instruction that describes the task: ### Input: Embed a binary quadratic model onto a target graph. Args: source_bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to embed. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a variable in the target graph and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`. smear_vartype (:class:`.Vartype`, optional, default=None): When a single variable is embedded, it's linear bias is 'smeared' evenly over the chain. This parameter determines whether the variable is smeared in SPIN or BINARY space. By default the embedding is done according to the given source_bqm. Returns: :obj:`.BinaryQuadraticModel`: Target binary quadratic model. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Target graph is a graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graphs >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, target) >>> target_bqm.quadratic[(0, 1)] == bqm.quadratic[('a', 'b')] True >>> target_bqm.quadratic # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample(target_bqm) >>> samples.record.sample # doctest: +SKIP array([[-1, -1, -1, -1], [ 1, -1, -1, -1], [ 1, 1, -1, -1], [-1, 1, -1, -1], [-1, 1, 1, -1], >>> # Snipped above samples for brevity ### Response: def embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=1.0, smear_vartype=None): """Embed a binary quadratic model onto a target graph. Args: source_bqm (:obj:`.BinaryQuadraticModel`): Binary quadratic model to embed. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a variable in the target graph and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to create chains. Note that the energy penalty of chain breaks is 2 * `chain_strength`. smear_vartype (:class:`.Vartype`, optional, default=None): When a single variable is embedded, it's linear bias is 'smeared' evenly over the chain. This parameter determines whether the variable is smeared in SPIN or BINARY space. By default the embedding is done according to the given source_bqm. Returns: :obj:`.BinaryQuadraticModel`: Target binary quadratic model. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Target graph is a graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graphs >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, target) >>> target_bqm.quadratic[(0, 1)] == bqm.quadratic[('a', 'b')] True >>> target_bqm.quadratic # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Binary quadratic model for a triangular source graph >>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1}) >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the BQM >>> target_bqm = dimod.embed_bqm(bqm, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample(target_bqm) >>> samples.record.sample # doctest: +SKIP array([[-1, -1, -1, -1], [ 1, -1, -1, -1], [ 1, 1, -1, -1], [-1, 1, -1, -1], [-1, 1, 1, -1], >>> # Snipped above samples for brevity """ if smear_vartype is dimod.SPIN and source_bqm.vartype is dimod.BINARY: return embed_bqm(source_bqm.spin, embedding, target_adjacency, chain_strength=chain_strength, smear_vartype=None).binary elif smear_vartype is dimod.BINARY and source_bqm.vartype is dimod.SPIN: return embed_bqm(source_bqm.binary, embedding, target_adjacency, chain_strength=chain_strength, smear_vartype=None).spin # create a new empty binary quadratic model with the same class as source_bqm target_bqm = source_bqm.empty(source_bqm.vartype) # add the offset target_bqm.add_offset(source_bqm.offset) # start with the linear biases, spreading the source bias equally over the target variables in # the chain for v, bias in iteritems(source_bqm.linear): if v in embedding: chain = embedding[v] else: raise MissingChainError(v) if any(u not in target_adjacency for u in chain): raise InvalidNodeError(v, next(u not in target_adjacency for u in chain)) b = bias / len(chain) target_bqm.add_variables_from({u: b for u in chain}) # next up the quadratic biases, spread the quadratic biases evenly over the available # interactions for (u, v), bias in iteritems(source_bqm.quadratic): available_interactions = {(s, t) for s in embedding[u] for t in embedding[v] if s in target_adjacency[t]} if not available_interactions: raise MissingEdgeError(u, v) b = bias / len(available_interactions) target_bqm.add_interactions_from((u, v, b) for u, v in available_interactions) for chain in itervalues(embedding): # in the case where the chain has length 1, there are no chain quadratic biases, but we # none-the-less want the chain variables to appear in the target_bqm if len(chain) == 1: v, = chain target_bqm.add_variable(v, 0.0) continue quadratic_chain_biases = chain_to_quadratic(chain, target_adjacency, chain_strength) target_bqm.add_interactions_from(quadratic_chain_biases, vartype=dimod.SPIN) # these are spin # add the energy for satisfied chains to the offset energy_diff = -sum(itervalues(quadratic_chain_biases)) target_bqm.add_offset(energy_diff) return target_bqm
def design(npos): """ make a design matrix for an anisotropy experiment """ if npos == 15: # # rotatable design of Jelinek for kappabridge (see Tauxe, 1998) # A = np.array([[.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [1, .0, 0, 0, 0, 0], [.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [0, 1., 0, 0, 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.], [0, 0, 1., 0, 0, 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 15 measurment positions elif npos == 6: A = np.array([[1., 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0], [0, 0, 1., 0, 0, 0], [.5, .5, 0, 1., 0, 0], [ 0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 6 measurment positions else: print("measurement protocol not supported yet ") return B = np.dot(np.transpose(A), A) B = linalg.inv(B) B = np.dot(B, np.transpose(A)) return A, B
make a design matrix for an anisotropy experiment
Below is the the instruction that describes the task: ### Input: make a design matrix for an anisotropy experiment ### Response: def design(npos): """ make a design matrix for an anisotropy experiment """ if npos == 15: # # rotatable design of Jelinek for kappabridge (see Tauxe, 1998) # A = np.array([[.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [1, .0, 0, 0, 0, 0], [.5, .5, 0, -1., 0, 0], [.5, .5, 0, 1., 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [0, 1., 0, 0, 0, 0], [0, .5, .5, 0, -1., 0], [0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.], [0, 0, 1., 0, 0, 0], [.5, 0, .5, 0, 0, -1.], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 15 measurment positions elif npos == 6: A = np.array([[1., 0, 0, 0, 0, 0], [0, 1., 0, 0, 0, 0], [0, 0, 1., 0, 0, 0], [.5, .5, 0, 1., 0, 0], [ 0, .5, .5, 0, 1., 0], [.5, 0, .5, 0, 0, 1.]]) # design matrix for 6 measurment positions else: print("measurement protocol not supported yet ") return B = np.dot(np.transpose(A), A) B = linalg.inv(B) B = np.dot(B, np.transpose(A)) return A, B
def ticket_field_option_show(self, field_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-a-ticket-field-option" api_path = "/api/v2/ticket_fields/{field_id}/options/{id}.json" api_path = api_path.format(field_id=field_id, id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-a-ticket-field-option
Below is the the instruction that describes the task: ### Input: https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-a-ticket-field-option ### Response: def ticket_field_option_show(self, field_id, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/ticket_fields#show-a-ticket-field-option" api_path = "/api/v2/ticket_fields/{field_id}/options/{id}.json" api_path = api_path.format(field_id=field_id, id=id) return self.call(api_path, **kwargs)
def _read_pidfile(self): """Read the PID file and check to make sure it's not stale.""" if self.pidfile is None: return None if not os.path.isfile(self.pidfile): return None # Read the PID file with open(self.pidfile, 'r') as fp: try: pid = int(fp.read()) except ValueError: self._emit_warning('Empty or broken pidfile {pidfile}; ' 'removing'.format(pidfile=self.pidfile)) pid = None if pid is not None and psutil.pid_exists(pid): return pid else: # Remove the stale PID file os.remove(self.pidfile) return None
Read the PID file and check to make sure it's not stale.
Below is the the instruction that describes the task: ### Input: Read the PID file and check to make sure it's not stale. ### Response: def _read_pidfile(self): """Read the PID file and check to make sure it's not stale.""" if self.pidfile is None: return None if not os.path.isfile(self.pidfile): return None # Read the PID file with open(self.pidfile, 'r') as fp: try: pid = int(fp.read()) except ValueError: self._emit_warning('Empty or broken pidfile {pidfile}; ' 'removing'.format(pidfile=self.pidfile)) pid = None if pid is not None and psutil.pid_exists(pid): return pid else: # Remove the stale PID file os.remove(self.pidfile) return None
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
Produce standard documentation for memberdef_nodes.
Below is the the instruction that describes the task: ### Input: Produce standard documentation for memberdef_nodes. ### Response: def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
def replace_chars_for_svg_code(svg_content): """ Replace known special characters to SVG code. Parameters ---------- svg_content: str Returns ------- corrected_svg: str Corrected SVG content """ result = svg_content svg_char = [ ('&', '&amp;'), ('>', '&gt;'), ('<', '&lt;'), ('"', '&quot;'), ] for c, entity in svg_char: result = result.replace(c, entity) return result
Replace known special characters to SVG code. Parameters ---------- svg_content: str Returns ------- corrected_svg: str Corrected SVG content
Below is the the instruction that describes the task: ### Input: Replace known special characters to SVG code. Parameters ---------- svg_content: str Returns ------- corrected_svg: str Corrected SVG content ### Response: def replace_chars_for_svg_code(svg_content): """ Replace known special characters to SVG code. Parameters ---------- svg_content: str Returns ------- corrected_svg: str Corrected SVG content """ result = svg_content svg_char = [ ('&', '&amp;'), ('>', '&gt;'), ('<', '&lt;'), ('"', '&quot;'), ] for c, entity in svg_char: result = result.replace(c, entity) return result
def lookup(self, ResponseGroup="Large", **kwargs): """Lookup an Amazon Product. :return: An instance of :class:`~.AmazonProduct` if one item was returned, or a list of :class:`~.AmazonProduct` instances if multiple items where returned. """ response = self.api.ItemLookup(ResponseGroup=ResponseGroup, **kwargs) root = objectify.fromstring(response) if root.Items.Request.IsValid == 'False': code = root.Items.Request.Errors.Error.Code msg = root.Items.Request.Errors.Error.Message raise LookupException( "Amazon Product Lookup Error: '{0}', '{1}'".format(code, msg)) if not hasattr(root.Items, 'Item'): raise AsinNotFound("ASIN(s) not found: '{0}'".format( etree.tostring(root, pretty_print=True))) if len(root.Items.Item) > 1: return [ AmazonProduct( item, self.aws_associate_tag, self, region=self.region) for item in root.Items.Item ] else: return AmazonProduct( root.Items.Item, self.aws_associate_tag, self, region=self.region )
Lookup an Amazon Product. :return: An instance of :class:`~.AmazonProduct` if one item was returned, or a list of :class:`~.AmazonProduct` instances if multiple items where returned.
Below is the the instruction that describes the task: ### Input: Lookup an Amazon Product. :return: An instance of :class:`~.AmazonProduct` if one item was returned, or a list of :class:`~.AmazonProduct` instances if multiple items where returned. ### Response: def lookup(self, ResponseGroup="Large", **kwargs): """Lookup an Amazon Product. :return: An instance of :class:`~.AmazonProduct` if one item was returned, or a list of :class:`~.AmazonProduct` instances if multiple items where returned. """ response = self.api.ItemLookup(ResponseGroup=ResponseGroup, **kwargs) root = objectify.fromstring(response) if root.Items.Request.IsValid == 'False': code = root.Items.Request.Errors.Error.Code msg = root.Items.Request.Errors.Error.Message raise LookupException( "Amazon Product Lookup Error: '{0}', '{1}'".format(code, msg)) if not hasattr(root.Items, 'Item'): raise AsinNotFound("ASIN(s) not found: '{0}'".format( etree.tostring(root, pretty_print=True))) if len(root.Items.Item) > 1: return [ AmazonProduct( item, self.aws_associate_tag, self, region=self.region) for item in root.Items.Item ] else: return AmazonProduct( root.Items.Item, self.aws_associate_tag, self, region=self.region )
def delete(self, name): '''delete an image from Google Storage. Parameters ========== name: the name of the file (or image) to delete ''' bot.debug("DELETE %s" % name) for file_object in files: if isinstance(file_object, dict): if "kind" in file_object: if file_object['kind'] == "storage#object": object_name = "/".join(file_object['id'].split('/')[:-1]) object_name = re.sub('%s/' %self._bucket['name'],'', object_name,1) delete_object(service=self._bucket_service, bucket_name=bucket['name'], object_name=object_name)
delete an image from Google Storage. Parameters ========== name: the name of the file (or image) to delete
Below is the the instruction that describes the task: ### Input: delete an image from Google Storage. Parameters ========== name: the name of the file (or image) to delete ### Response: def delete(self, name): '''delete an image from Google Storage. Parameters ========== name: the name of the file (or image) to delete ''' bot.debug("DELETE %s" % name) for file_object in files: if isinstance(file_object, dict): if "kind" in file_object: if file_object['kind'] == "storage#object": object_name = "/".join(file_object['id'].split('/')[:-1]) object_name = re.sub('%s/' %self._bucket['name'],'', object_name,1) delete_object(service=self._bucket_service, bucket_name=bucket['name'], object_name=object_name)
def write_sequences_to_tsv(path, seqs): """ Create a TSV (or CSV, depending on the extension) file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. If the path extension is '.tsv', fields will be delimited by tabs. If the extension is '.csv', fields will be delimited by commas. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ import csv path = Path(path) delimiter = {'.tsv': '\t', '.csv': ','}[path.suffix] with path.open('w') as file: w = csv.writer(file, delimiter=delimiter) for row in seqs.items(): w.writerow(row)
Create a TSV (or CSV, depending on the extension) file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. If the path extension is '.tsv', fields will be delimited by tabs. If the extension is '.csv', fields will be delimited by commas. seqs: dict A mapping of names to sequences, which can be either protein or DNA.
Below is the the instruction that describes the task: ### Input: Create a TSV (or CSV, depending on the extension) file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. If the path extension is '.tsv', fields will be delimited by tabs. If the extension is '.csv', fields will be delimited by commas. seqs: dict A mapping of names to sequences, which can be either protein or DNA. ### Response: def write_sequences_to_tsv(path, seqs): """ Create a TSV (or CSV, depending on the extension) file listing the given sequences. Arguments ========= path: str or pathlib.Path The name of the file to create. If the path extension is '.tsv', fields will be delimited by tabs. If the extension is '.csv', fields will be delimited by commas. seqs: dict A mapping of names to sequences, which can be either protein or DNA. """ import csv path = Path(path) delimiter = {'.tsv': '\t', '.csv': ','}[path.suffix] with path.open('w') as file: w = csv.writer(file, delimiter=delimiter) for row in seqs.items(): w.writerow(row)
def param_map_rc_send(self, target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max, force_mavlink1=False): ''' Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float) ''' return self.send(self.param_map_rc_encode(target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max), force_mavlink1=force_mavlink1)
Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float)
Below is the the instruction that describes the task: ### Input: Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float) ### Response: def param_map_rc_send(self, target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max, force_mavlink1=False): ''' Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float) ''' return self.send(self.param_map_rc_encode(target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max), force_mavlink1=force_mavlink1)
def _management_form(self): """Returns the ManagementForm instance for this FormSet.""" if self.is_bound: form = ConcurrentManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix) if not form.is_valid(): raise ValidationError('ManagementForm data is missing or has been tampered with') else: form = ConcurrentManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MAX_NUM_FORM_COUNT: self.max_num}, versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form in self.initial_forms]) return form
Returns the ManagementForm instance for this FormSet.
Below is the the instruction that describes the task: ### Input: Returns the ManagementForm instance for this FormSet. ### Response: def _management_form(self): """Returns the ManagementForm instance for this FormSet.""" if self.is_bound: form = ConcurrentManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix) if not form.is_valid(): raise ValidationError('ManagementForm data is missing or has been tampered with') else: form = ConcurrentManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MAX_NUM_FORM_COUNT: self.max_num}, versions=[(form.instance.pk, get_revision_of_object(form.instance)) for form in self.initial_forms]) return form
def rdtxt_gos(go_file, prt): """Read GO IDs from a file.""" goids_all = set() if not os.path.exists(go_file): raise RuntimeError("CAN NOT READ GO FILE: {FILE}\n".format(FILE=go_file)) re_go = re.compile(r'(GO:\d{7})+?') re_com = re.compile(r'^\s*#') # Lines starting with a '#' are comment lines and ignored with open(go_file) as ifstrm: for line in ifstrm: # Skip lines that are comments if re_com.search(line): continue # Search for GO IDs on the line goids_found = re_go.findall(line) if goids_found: goids_all.update(goids_found) if prt: prt.write(" {N} GO IDs READ: {TXT}\n".format(N=len(goids_all), TXT=go_file)) return goids_all
Read GO IDs from a file.
Below is the the instruction that describes the task: ### Input: Read GO IDs from a file. ### Response: def rdtxt_gos(go_file, prt): """Read GO IDs from a file.""" goids_all = set() if not os.path.exists(go_file): raise RuntimeError("CAN NOT READ GO FILE: {FILE}\n".format(FILE=go_file)) re_go = re.compile(r'(GO:\d{7})+?') re_com = re.compile(r'^\s*#') # Lines starting with a '#' are comment lines and ignored with open(go_file) as ifstrm: for line in ifstrm: # Skip lines that are comments if re_com.search(line): continue # Search for GO IDs on the line goids_found = re_go.findall(line) if goids_found: goids_all.update(goids_found) if prt: prt.write(" {N} GO IDs READ: {TXT}\n".format(N=len(goids_all), TXT=go_file)) return goids_all
def results_path(self) -> str: """The path where the project results will be written""" def possible_paths(): yield self._results_path yield self.settings.fetch('path_results') yield environ.configs.fetch('results_directory') yield environ.paths.results(self.uuid) return next(p for p in possible_paths() if p is not None)
The path where the project results will be written
Below is the the instruction that describes the task: ### Input: The path where the project results will be written ### Response: def results_path(self) -> str: """The path where the project results will be written""" def possible_paths(): yield self._results_path yield self.settings.fetch('path_results') yield environ.configs.fetch('results_directory') yield environ.paths.results(self.uuid) return next(p for p in possible_paths() if p is not None)
def col(self, c): """Parse colour specification""" m = self.COLOUR.search(c) if not m: self.logger.fatal("Cannot parse colour specification %r.", c) raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c)) value = m.group('value') color = m.group('symbol') self.logger.debug("%s: %s %s\n", c.strip(), color, value) return color, value
Parse colour specification
Below is the the instruction that describes the task: ### Input: Parse colour specification ### Response: def col(self, c): """Parse colour specification""" m = self.COLOUR.search(c) if not m: self.logger.fatal("Cannot parse colour specification %r.", c) raise ParseError("XPM reader: Cannot parse colour specification {0!r}.".format(c)) value = m.group('value') color = m.group('symbol') self.logger.debug("%s: %s %s\n", c.strip(), color, value) return color, value
def find_by_index(self, cls, index_name, value): """Required functionality.""" table_name = cls.get_table_name() index_name_vals = [(index_name, value)] final_results = [] for db_result in read_by_indexes(table_name, index_name_vals): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
Required functionality.
Below is the the instruction that describes the task: ### Input: Required functionality. ### Response: def find_by_index(self, cls, index_name, value): """Required functionality.""" table_name = cls.get_table_name() index_name_vals = [(index_name, value)] final_results = [] for db_result in read_by_indexes(table_name, index_name_vals): obj = cls.from_data(db_result['value']) final_results.append(obj) return final_results
def wind_send(self, direction, speed, speed_z, force_mavlink1=False): ''' Wind estimation direction : wind direction that wind is coming from (degrees) (float) speed : wind speed in ground plane (m/s) (float) speed_z : vertical wind speed (m/s) (float) ''' return self.send(self.wind_encode(direction, speed, speed_z), force_mavlink1=force_mavlink1)
Wind estimation direction : wind direction that wind is coming from (degrees) (float) speed : wind speed in ground plane (m/s) (float) speed_z : vertical wind speed (m/s) (float)
Below is the the instruction that describes the task: ### Input: Wind estimation direction : wind direction that wind is coming from (degrees) (float) speed : wind speed in ground plane (m/s) (float) speed_z : vertical wind speed (m/s) (float) ### Response: def wind_send(self, direction, speed, speed_z, force_mavlink1=False): ''' Wind estimation direction : wind direction that wind is coming from (degrees) (float) speed : wind speed in ground plane (m/s) (float) speed_z : vertical wind speed (m/s) (float) ''' return self.send(self.wind_encode(direction, speed, speed_z), force_mavlink1=force_mavlink1)
def sitemap_index(request): """Return a sitemap index xml file for search engines.""" sitemaps = [] with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute("""\ SELECT authors[1], max(revised) FROM latest_modules WHERE portal_type NOT IN ('CompositeModule', 'SubCollection') GROUP BY authors[1] """) for author, revised in cursor.fetchall(): sitemaps.append(Sitemap(url=request.route_url( 'sitemap', from_id=author), lastmod=revised)) si = SitemapIndex(sitemaps=sitemaps) resp = request.response resp.status = '200 OK' resp.content_type = 'text/xml' resp.body = si() return resp
Return a sitemap index xml file for search engines.
Below is the the instruction that describes the task: ### Input: Return a sitemap index xml file for search engines. ### Response: def sitemap_index(request): """Return a sitemap index xml file for search engines.""" sitemaps = [] with db_connect() as db_connection: with db_connection.cursor() as cursor: cursor.execute("""\ SELECT authors[1], max(revised) FROM latest_modules WHERE portal_type NOT IN ('CompositeModule', 'SubCollection') GROUP BY authors[1] """) for author, revised in cursor.fetchall(): sitemaps.append(Sitemap(url=request.route_url( 'sitemap', from_id=author), lastmod=revised)) si = SitemapIndex(sitemaps=sitemaps) resp = request.response resp.status = '200 OK' resp.content_type = 'text/xml' resp.body = si() return resp
def roche_requiv_L1(q, syncpar, ecc, sma, incl_star, long_an_star, incl_orb, long_an_orb, compno=1): """ TODO: add documentation """ return ConstraintParameter(q._bundle, "requiv_L1(%s, %d)" % (", ".join(["{%s}" % (param.uniquetwig if hasattr(param, 'uniquetwig') else param.expr) for param in (q, syncpar, ecc, sma, incl_star, long_an_star, incl_orb, long_an_orb)]), compno))
TODO: add documentation
Below is the the instruction that describes the task: ### Input: TODO: add documentation ### Response: def roche_requiv_L1(q, syncpar, ecc, sma, incl_star, long_an_star, incl_orb, long_an_orb, compno=1): """ TODO: add documentation """ return ConstraintParameter(q._bundle, "requiv_L1(%s, %d)" % (", ".join(["{%s}" % (param.uniquetwig if hasattr(param, 'uniquetwig') else param.expr) for param in (q, syncpar, ecc, sma, incl_star, long_an_star, incl_orb, long_an_orb)]), compno))
def readlines(self, encoding=None): """Reads from the file and returns result as a list of lines.""" try: encoding = encoding or ENCODING with codecs.open(self.path, encoding=None) as fi: return fi.readlines() except: return []
Reads from the file and returns result as a list of lines.
Below is the the instruction that describes the task: ### Input: Reads from the file and returns result as a list of lines. ### Response: def readlines(self, encoding=None): """Reads from the file and returns result as a list of lines.""" try: encoding = encoding or ENCODING with codecs.open(self.path, encoding=None) as fi: return fi.readlines() except: return []
async def cycle(source): """Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item. """ while True: async with streamcontext(source) as streamer: async for item in streamer: yield item # Prevent blocking while loop if the stream is empty await asyncio.sleep(0)
Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item.
Below is the the instruction that describes the task: ### Input: Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item. ### Response: async def cycle(source): """Iterate indefinitely over an asynchronous sequence. Note: it does not perform any buffering, but re-iterate over the same given sequence instead. If the sequence is not re-iterable, the generator might end up looping indefinitely without yielding any item. """ while True: async with streamcontext(source) as streamer: async for item in streamer: yield item # Prevent blocking while loop if the stream is empty await asyncio.sleep(0)
def get_approvals(self, issue_id_or_key, start=0, limit=50): """ Get all approvals on a request, for a given request ID/Key :param issue_id_or_key: str :param start: OPTIONAL: int :param limit: OPTIONAL: int :return: """ url = 'rest/servicedeskapi/request/{}/approval'.format(issue_id_or_key) params = {} if start is not None: params['start'] = int(start) if limit is not None: params['limit'] = int(limit) return self.get(url, headers=self.experimental_headers, params=params).get('values')
Get all approvals on a request, for a given request ID/Key :param issue_id_or_key: str :param start: OPTIONAL: int :param limit: OPTIONAL: int :return:
Below is the the instruction that describes the task: ### Input: Get all approvals on a request, for a given request ID/Key :param issue_id_or_key: str :param start: OPTIONAL: int :param limit: OPTIONAL: int :return: ### Response: def get_approvals(self, issue_id_or_key, start=0, limit=50): """ Get all approvals on a request, for a given request ID/Key :param issue_id_or_key: str :param start: OPTIONAL: int :param limit: OPTIONAL: int :return: """ url = 'rest/servicedeskapi/request/{}/approval'.format(issue_id_or_key) params = {} if start is not None: params['start'] = int(start) if limit is not None: params['limit'] = int(limit) return self.get(url, headers=self.experimental_headers, params=params).get('values')
def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None): """ Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top".
Below is the the instruction that describes the task: ### Input: Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top". ### Response: def shap_values(self, X, nsamples=200, ranked_outputs=None, output_rank_order="max", rseed=None): """ Return the values for the model applied to X. Parameters ---------- X : list, if framework == 'tensorflow': numpy.array, or pandas.DataFrame if framework == 'pytorch': torch.tensor A tensor (or list of tensors) of samples (where X.shape[0] == # samples) on which to explain the model's output. ranked_outputs : None or int If ranked_outputs is None then we explain all the outputs in a multi-output model. If ranked_outputs is a positive integer then we only explain that many of the top model outputs (where "top" is determined by output_rank_order). Note that this causes a pair of values to be returned (shap_values, indexes), where phi is a list of numpy arrays for each of the output ranks, and indexes is a matrix that tells for each sample which output indexes were choses as "top". output_rank_order : "max", "min", "max_abs", or "custom" How to order the model outputs when using ranked_outputs, either by maximum, minimum, or maximum absolute value. If "custom" Then "ranked_outputs" contains a list of output nodes. rseed : None or int Seeding the randomness in shap value computation (background example choice, interpolation between current and background example, smoothing). Returns ------- For a models with a single output this returns a tensor of SHAP values with the same shape as X. For a model with multiple outputs this returns a list of SHAP value tensors, each of which are the same shape as X. If ranked_outputs is None then this list of tensors matches the number of model outputs. If ranked_outputs is a positive integer a pair is returned (shap_values, indexes), where shap_values is a list of tensors with a length of ranked_outputs, and indexes is a matrix that tells for each sample which output indexes were chosen as "top". """ return self.explainer.shap_values(X, nsamples, ranked_outputs, output_rank_order, rseed)
def set_config(self, config): """ Set SDB based config """ self._config = config self._config.dump_to_sdb("botoConfigs", self.id)
Set SDB based config
Below is the the instruction that describes the task: ### Input: Set SDB based config ### Response: def set_config(self, config): """ Set SDB based config """ self._config = config self._config.dump_to_sdb("botoConfigs", self.id)
def idle_task(self): '''called rapidly by mavproxy''' if self.downloaders_lock.acquire(False): removed_one = False for url in self.downloaders.keys(): if not self.downloaders[url].is_alive(): print("fw: Download thread for (%s) done" % url) del self.downloaders[url] removed_one = True if removed_one and not self.downloaders.keys(): # all downloads finished - parse them self.manifests_parse() self.downloaders_lock.release()
called rapidly by mavproxy
Below is the the instruction that describes the task: ### Input: called rapidly by mavproxy ### Response: def idle_task(self): '''called rapidly by mavproxy''' if self.downloaders_lock.acquire(False): removed_one = False for url in self.downloaders.keys(): if not self.downloaders[url].is_alive(): print("fw: Download thread for (%s) done" % url) del self.downloaders[url] removed_one = True if removed_one and not self.downloaders.keys(): # all downloads finished - parse them self.manifests_parse() self.downloaders_lock.release()
def split_by_percent(self, spin_systems_list): """Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list` """ chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list`
Below is the the instruction that describes the task: ### Input: Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list` ### Response: def split_by_percent(self, spin_systems_list): """Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list` """ chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
def check_str_length(str_to_check, limit=MAX_LENGTH): """Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. """ str_bytes = str_to_check.encode(UTF8) str_len = len(str_bytes) truncated_byte_count = 0 if str_len > limit: truncated_byte_count = str_len - limit str_bytes = str_bytes[:limit] result = str(str_bytes.decode(UTF8, errors='ignore')) return (result, truncated_byte_count)
Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count.
Below is the the instruction that describes the task: ### Input: Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. ### Response: def check_str_length(str_to_check, limit=MAX_LENGTH): """Check the length of a string. If exceeds limit, then truncate it. :type str_to_check: str :param str_to_check: String to check. :type limit: int :param limit: The upper limit of the length. :rtype: tuple :returns: The string it self if not exceeded length, or truncated string if exceeded and the truncated byte count. """ str_bytes = str_to_check.encode(UTF8) str_len = len(str_bytes) truncated_byte_count = 0 if str_len > limit: truncated_byte_count = str_len - limit str_bytes = str_bytes[:limit] result = str(str_bytes.decode(UTF8, errors='ignore')) return (result, truncated_byte_count)
def get_parent_repository_ids(self, repository_id): """Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=repository_id) return self._hierarchy_session.get_parents(id_=repository_id)
Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* ### Response: def get_parent_repository_ids(self, repository_id): """Gets the parent ``Ids`` of the given repository. arg: repository_id (osid.id.Id): a repository ``Id`` return: (osid.id.IdList) - the parent ``Ids`` of the repository raise: NotFound - ``repository_id`` is not found raise: NullArgument - ``repository_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_parent_bin_ids if self._catalog_session is not None: return self._catalog_session.get_parent_catalog_ids(catalog_id=repository_id) return self._hierarchy_session.get_parents(id_=repository_id)
def has_file_with(path, filename, content): """ Check whether *filename* in *path* contains the string *content*. """ try: with open(os.path.join(path, filename), "rb") as f: return content in f.read() except IOError as e: if e.errno == errno.ENOENT: return False else: raise
Check whether *filename* in *path* contains the string *content*.
Below is the the instruction that describes the task: ### Input: Check whether *filename* in *path* contains the string *content*. ### Response: def has_file_with(path, filename, content): """ Check whether *filename* in *path* contains the string *content*. """ try: with open(os.path.join(path, filename), "rb") as f: return content in f.read() except IOError as e: if e.errno == errno.ENOENT: return False else: raise
def is_alive(self): """Helper function to show if send & recv Threads are running """ if self.__send_ready.is_set() and self.__recv_ready.is_set(): if self.__send_thread is not None and self.__recv_thread is not None: return self.__send_thread.is_alive() and self.__recv_thread.is_alive() return False
Helper function to show if send & recv Threads are running
Below is the the instruction that describes the task: ### Input: Helper function to show if send & recv Threads are running ### Response: def is_alive(self): """Helper function to show if send & recv Threads are running """ if self.__send_ready.is_set() and self.__recv_ready.is_set(): if self.__send_thread is not None and self.__recv_thread is not None: return self.__send_thread.is_alive() and self.__recv_thread.is_alive() return False
def write_documentation(self, doc_type): """ Build all the doc page for handlers :param doc_type: Type of doc to generate (controller, compute) """ for handler_name in sorted(self._documentation): if "controller." in handler_name: server_type = "controller" elif "compute" in handler_name: server_type = "compute" else: server_type = "root" if doc_type != server_type: continue print("Build {}".format(handler_name)) for path in sorted(self._documentation[handler_name]): api_version = self._documentation[handler_name][path]["api_version"] if api_version is None: continue filename = self._file_path(path) handler_doc = self._documentation[handler_name][path] handler = handler_name.replace(server_type + ".", "") self._create_handler_directory(handler, api_version, server_type) with open("{}/api/v{}/{}/{}/{}.rst".format(self._directory, api_version, server_type, handler, filename), 'w+') as f: f.write('{}\n------------------------------------------------------------------------------------------------------------------------------------------\n\n'.format(path)) f.write('.. contents::\n') for method in handler_doc["methods"]: f.write('\n{} {}\n'.format(method["method"], path.replace("{", '**{').replace("}", "}**"))) f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n') f.write('{}\n\n'.format(method["description"])) if len(method["parameters"]) > 0: f.write("Parameters\n**********\n") for parameter in method["parameters"]: desc = method["parameters"][parameter] f.write("- **{}**: {}\n".format(parameter, desc)) f.write("\n") f.write("Response status codes\n**********************\n") for code in method["status_codes"]: desc = method["status_codes"][code] f.write("- **{}**: {}\n".format(code, desc)) f.write("\n") if "properties" in method["input_schema"]: f.write("Input\n*******\n") self._write_definitions(f, method["input_schema"]) self._write_json_schema(f, method["input_schema"]) if "properties" in method["output_schema"]: f.write("Output\n*******\n") self._write_json_schema(f, method["output_schema"]) self._include_query_example(f, method, path, api_version, server_type)
Build all the doc page for handlers :param doc_type: Type of doc to generate (controller, compute)
Below is the the instruction that describes the task: ### Input: Build all the doc page for handlers :param doc_type: Type of doc to generate (controller, compute) ### Response: def write_documentation(self, doc_type): """ Build all the doc page for handlers :param doc_type: Type of doc to generate (controller, compute) """ for handler_name in sorted(self._documentation): if "controller." in handler_name: server_type = "controller" elif "compute" in handler_name: server_type = "compute" else: server_type = "root" if doc_type != server_type: continue print("Build {}".format(handler_name)) for path in sorted(self._documentation[handler_name]): api_version = self._documentation[handler_name][path]["api_version"] if api_version is None: continue filename = self._file_path(path) handler_doc = self._documentation[handler_name][path] handler = handler_name.replace(server_type + ".", "") self._create_handler_directory(handler, api_version, server_type) with open("{}/api/v{}/{}/{}/{}.rst".format(self._directory, api_version, server_type, handler, filename), 'w+') as f: f.write('{}\n------------------------------------------------------------------------------------------------------------------------------------------\n\n'.format(path)) f.write('.. contents::\n') for method in handler_doc["methods"]: f.write('\n{} {}\n'.format(method["method"], path.replace("{", '**{').replace("}", "}**"))) f.write('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n') f.write('{}\n\n'.format(method["description"])) if len(method["parameters"]) > 0: f.write("Parameters\n**********\n") for parameter in method["parameters"]: desc = method["parameters"][parameter] f.write("- **{}**: {}\n".format(parameter, desc)) f.write("\n") f.write("Response status codes\n**********************\n") for code in method["status_codes"]: desc = method["status_codes"][code] f.write("- **{}**: {}\n".format(code, desc)) f.write("\n") if "properties" in method["input_schema"]: f.write("Input\n*******\n") self._write_definitions(f, method["input_schema"]) self._write_json_schema(f, method["input_schema"]) if "properties" in method["output_schema"]: f.write("Output\n*******\n") self._write_json_schema(f, method["output_schema"]) self._include_query_example(f, method, path, api_version, server_type)
def set_sampler_info(self, sample): """Updates the Sampler and the Sample Date with the values provided in the request. If neither Sampler nor SampleDate are present in the request, returns False """ if sample.getSampler() and sample.getDateSampled(): # Sampler and Date Sampled already set. This is correct return True sampler = self.get_form_value("Sampler", sample, sample.getSampler()) sampled = self.get_form_value("getDateSampled", sample, sample.getDateSampled()) if not all([sampler, sampled]): return False sample.setSampler(sampler) sample.setDateSampled(DateTime(sampled)) return True
Updates the Sampler and the Sample Date with the values provided in the request. If neither Sampler nor SampleDate are present in the request, returns False
Below is the the instruction that describes the task: ### Input: Updates the Sampler and the Sample Date with the values provided in the request. If neither Sampler nor SampleDate are present in the request, returns False ### Response: def set_sampler_info(self, sample): """Updates the Sampler and the Sample Date with the values provided in the request. If neither Sampler nor SampleDate are present in the request, returns False """ if sample.getSampler() and sample.getDateSampled(): # Sampler and Date Sampled already set. This is correct return True sampler = self.get_form_value("Sampler", sample, sample.getSampler()) sampled = self.get_form_value("getDateSampled", sample, sample.getDateSampled()) if not all([sampler, sampled]): return False sample.setSampler(sampler) sample.setDateSampled(DateTime(sampled)) return True
def _handle_array_ref(self, node, scope, ctxt, stream): """Handle ArrayRef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ ary = self._handle_node(node.name, scope, ctxt, stream) subscript = self._handle_node(node.subscript, scope, ctxt, stream) return ary[fields.PYVAL(subscript)]
Handle ArrayRef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
Below is the the instruction that describes the task: ### Input: Handle ArrayRef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO ### Response: def _handle_array_ref(self, node, scope, ctxt, stream): """Handle ArrayRef nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO """ ary = self._handle_node(node.name, scope, ctxt, stream) subscript = self._handle_node(node.subscript, scope, ctxt, stream) return ary[fields.PYVAL(subscript)]
def build_save_containers(platforms, registry, load_cache) -> int: """ Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise """ from joblib import Parallel, delayed if len(platforms) == 0: return 0 platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")( delayed(_build_save_container)(platform, registry, load_cache) for platform in platforms) is_error = False for platform_result in platform_results: if platform_result is not None: logging.error('Failed to generate %s', platform_result) is_error = True return 1 if is_error else 0
Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise
Below is the the instruction that describes the task: ### Input: Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise ### Response: def build_save_containers(platforms, registry, load_cache) -> int: """ Entry point to build and upload all built dockerimages in parallel :param platforms: List of platforms :param registry: Docker registry name :param load_cache: Load cache before building :return: 1 if error occurred, 0 otherwise """ from joblib import Parallel, delayed if len(platforms) == 0: return 0 platform_results = Parallel(n_jobs=PARALLEL_BUILDS, backend="multiprocessing")( delayed(_build_save_container)(platform, registry, load_cache) for platform in platforms) is_error = False for platform_result in platform_results: if platform_result is not None: logging.error('Failed to generate %s', platform_result) is_error = True return 1 if is_error else 0
def _fluent_size(self, fluents, ordering) -> Sequence[Sequence[int]]: '''Returns the sizes of `fluents` following the given `ordering`. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent. ''' shapes = [] for name in ordering: fluent = fluents[name] shape = self._param_types_to_shape(fluent.param_types) shapes.append(shape) return tuple(shapes)
Returns the sizes of `fluents` following the given `ordering`. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent.
Below is the the instruction that describes the task: ### Input: Returns the sizes of `fluents` following the given `ordering`. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent. ### Response: def _fluent_size(self, fluents, ordering) -> Sequence[Sequence[int]]: '''Returns the sizes of `fluents` following the given `ordering`. Returns: Sequence[Sequence[int]]: A tuple of tuple of integers representing the shape and size of each fluent. ''' shapes = [] for name in ordering: fluent = fluents[name] shape = self._param_types_to_shape(fluent.param_types) shapes.append(shape) return tuple(shapes)
def write_file(filename, content): """ Writes a file with the given filename and content and returns None. :param filename: A string containing the target filename. :param content: A string containing the data to be written. :return: None """ with open_file(filename, 'w') as f: f.write(content) file_prepender(filename)
Writes a file with the given filename and content and returns None. :param filename: A string containing the target filename. :param content: A string containing the data to be written. :return: None
Below is the the instruction that describes the task: ### Input: Writes a file with the given filename and content and returns None. :param filename: A string containing the target filename. :param content: A string containing the data to be written. :return: None ### Response: def write_file(filename, content): """ Writes a file with the given filename and content and returns None. :param filename: A string containing the target filename. :param content: A string containing the data to be written. :return: None """ with open_file(filename, 'w') as f: f.write(content) file_prepender(filename)
def __from_xml(self,value): """Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`""" n=value.children vns=get_node_ns(value) while n: if n.type!='element': n=n.next continue ns=get_node_ns(n) if (ns and vns and ns.getContent()!=vns.getContent()): n=n.next continue if n.name=='POBOX': self.pobox=unicode(n.getContent(),"utf-8","replace") elif n.name in ('EXTADR', 'EXTADD'): self.extadr=unicode(n.getContent(),"utf-8","replace") elif n.name=='STREET': self.street=unicode(n.getContent(),"utf-8","replace") elif n.name=='LOCALITY': self.locality=unicode(n.getContent(),"utf-8","replace") elif n.name=='REGION': self.region=unicode(n.getContent(),"utf-8","replace") elif n.name=='PCODE': self.pcode=unicode(n.getContent(),"utf-8","replace") elif n.name=='CTRY': self.ctry=unicode(n.getContent(),"utf-8","replace") elif n.name in ("HOME","WORK","POSTAL","PARCEL","DOM","INTL", "PREF"): self.type.append(n.name.lower()) n=n.next if self.type==[]: self.type=["intl","postal","parcel","work"] elif "dom" in self.type and "intl" in self.type: raise ValueError("Both 'dom' and 'intl' specified in vcard ADR")
Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`
Below is the the instruction that describes the task: ### Input: Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode` ### Response: def __from_xml(self,value): """Initialize a `VCardAdr` object from and XML element. :Parameters: - `value`: field value as an XML node :Types: - `value`: `libxml2.xmlNode`""" n=value.children vns=get_node_ns(value) while n: if n.type!='element': n=n.next continue ns=get_node_ns(n) if (ns and vns and ns.getContent()!=vns.getContent()): n=n.next continue if n.name=='POBOX': self.pobox=unicode(n.getContent(),"utf-8","replace") elif n.name in ('EXTADR', 'EXTADD'): self.extadr=unicode(n.getContent(),"utf-8","replace") elif n.name=='STREET': self.street=unicode(n.getContent(),"utf-8","replace") elif n.name=='LOCALITY': self.locality=unicode(n.getContent(),"utf-8","replace") elif n.name=='REGION': self.region=unicode(n.getContent(),"utf-8","replace") elif n.name=='PCODE': self.pcode=unicode(n.getContent(),"utf-8","replace") elif n.name=='CTRY': self.ctry=unicode(n.getContent(),"utf-8","replace") elif n.name in ("HOME","WORK","POSTAL","PARCEL","DOM","INTL", "PREF"): self.type.append(n.name.lower()) n=n.next if self.type==[]: self.type=["intl","postal","parcel","work"] elif "dom" in self.type and "intl" in self.type: raise ValueError("Both 'dom' and 'intl' specified in vcard ADR")
def insert(self, table, records, create_cols=False, dtypes=None): """ Insert one or many records in the database from a dictionary or a list of dictionaries """ if self._check_db() is False: return try: table = self.db[table] except Exception as e: self.err(e, "Can not find table " + table) t = type(records) if t == dict: func = table.insert elif t == list: func = table.insert_many else: msg = "Rows datatype " + \ str(t) + " not valid: use a list or a dictionary" self.err(msg) if create_cols is True: try: func(records, ensure=True, types=dtypes) except Exception as e: self.err(e, "Can not insert create columns and insert data") return else: try: func(records, types=dtypes) except Exception as e: self.err(e, "Can not insert data") return self.ok("Rows inserted in the database")
Insert one or many records in the database from a dictionary or a list of dictionaries
Below is the the instruction that describes the task: ### Input: Insert one or many records in the database from a dictionary or a list of dictionaries ### Response: def insert(self, table, records, create_cols=False, dtypes=None): """ Insert one or many records in the database from a dictionary or a list of dictionaries """ if self._check_db() is False: return try: table = self.db[table] except Exception as e: self.err(e, "Can not find table " + table) t = type(records) if t == dict: func = table.insert elif t == list: func = table.insert_many else: msg = "Rows datatype " + \ str(t) + " not valid: use a list or a dictionary" self.err(msg) if create_cols is True: try: func(records, ensure=True, types=dtypes) except Exception as e: self.err(e, "Can not insert create columns and insert data") return else: try: func(records, types=dtypes) except Exception as e: self.err(e, "Can not insert data") return self.ok("Rows inserted in the database")
def white(self): """ Build command for turning the led into white mode. :return: The command. """ return self._build_command(self._offset(0xC5), select=True, select_command=self.on())
Build command for turning the led into white mode. :return: The command.
Below is the the instruction that describes the task: ### Input: Build command for turning the led into white mode. :return: The command. ### Response: def white(self): """ Build command for turning the led into white mode. :return: The command. """ return self._build_command(self._offset(0xC5), select=True, select_command=self.on())
def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
Below is the the instruction that describes the task: ### Input: Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None ### Response: def run_command(cmd, debug=False): """ Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None """ if debug: msg = ' PWD: {}'.format(os.getcwd()) print_warn(msg) msg = ' COMMAND: {}'.format(cmd) print_warn(msg) cmd()
def check_switch_vendor(old_vendor, name, urls, _depth=0): """Check if the project should switch vendors. E.g project pushed on pypi, but changelog on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: tuple, (str(new vendor name), str(new project name)) """ if _depth > 3: # Protect against recursive things vendors here. return "" new_name = check_for_launchpad(old_vendor, name, urls) if new_name: return "launchpad", new_name return "", ""
Check if the project should switch vendors. E.g project pushed on pypi, but changelog on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: tuple, (str(new vendor name), str(new project name))
Below is the the instruction that describes the task: ### Input: Check if the project should switch vendors. E.g project pushed on pypi, but changelog on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: tuple, (str(new vendor name), str(new project name)) ### Response: def check_switch_vendor(old_vendor, name, urls, _depth=0): """Check if the project should switch vendors. E.g project pushed on pypi, but changelog on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: tuple, (str(new vendor name), str(new project name)) """ if _depth > 3: # Protect against recursive things vendors here. return "" new_name = check_for_launchpad(old_vendor, name, urls) if new_name: return "launchpad", new_name return "", ""
def medlineParser(pubFile): """Parses a medline file, _pubFile_, to extract the individual entries as [MedlineRecords](#metaknowledge.medline.recordMedline.MedlineRecord). A medline file is a series of entries, each entry is a series of tags. A tag is a 2 to 4 character string each tag is padded with spaces on the left to make it 4 characters which is followed by a dash and a space (`'- '`). Everything after the tag and on all lines after it not starting with a tag is considered associated with the tag. Each entry's first tag is `PMID`, so a first line looks something like `PMID- 26524502`. Entries end with a single blank line. # Parameters _pubFile_ : `str` > A path to a valid medline file, use [isMedlineFile](#metaknowledge.medline.medlineHandlers.isMedlineFile) to verify # Returns `set[MedlineRecord]` > Records for each of the entries """ #assumes the file is MEDLINE recSet = set() error = None lineNum = 0 try: with open(pubFile, 'r', encoding = 'latin-1') as openfile: f = enumerate(openfile, start = 1) lineNum, line = next(f) try: while True: if line.startswith("PMID- "): try: r = MedlineRecord(itertools.chain([(lineNum, line)], f), sFile = pubFile, sLine = lineNum) recSet.add(r) except BadPubmedFile as e: badLine = lineNum try: lineNum, line = next(f) while not line.startswith("PMID- "): lineNum, line = next(f) except (StopIteration, UnicodeDecodeError) as e: if error is None: error = BadPubmedFile("The file '{}' becomes unparsable after line: {}, due to the error: {} ".format(pubFile, badLine, e)) raise e elif line != '\n': if error is None: error = BadPubmedFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(pubFile, lineNum)) lineNum, line = next(f) except StopIteration: #End of the file has been reached pass except UnicodeDecodeError: if error is None: error = BadPubmedFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(pubFile, lineNum)) return recSet, error
Parses a medline file, _pubFile_, to extract the individual entries as [MedlineRecords](#metaknowledge.medline.recordMedline.MedlineRecord). A medline file is a series of entries, each entry is a series of tags. A tag is a 2 to 4 character string each tag is padded with spaces on the left to make it 4 characters which is followed by a dash and a space (`'- '`). Everything after the tag and on all lines after it not starting with a tag is considered associated with the tag. Each entry's first tag is `PMID`, so a first line looks something like `PMID- 26524502`. Entries end with a single blank line. # Parameters _pubFile_ : `str` > A path to a valid medline file, use [isMedlineFile](#metaknowledge.medline.medlineHandlers.isMedlineFile) to verify # Returns `set[MedlineRecord]` > Records for each of the entries
Below is the the instruction that describes the task: ### Input: Parses a medline file, _pubFile_, to extract the individual entries as [MedlineRecords](#metaknowledge.medline.recordMedline.MedlineRecord). A medline file is a series of entries, each entry is a series of tags. A tag is a 2 to 4 character string each tag is padded with spaces on the left to make it 4 characters which is followed by a dash and a space (`'- '`). Everything after the tag and on all lines after it not starting with a tag is considered associated with the tag. Each entry's first tag is `PMID`, so a first line looks something like `PMID- 26524502`. Entries end with a single blank line. # Parameters _pubFile_ : `str` > A path to a valid medline file, use [isMedlineFile](#metaknowledge.medline.medlineHandlers.isMedlineFile) to verify # Returns `set[MedlineRecord]` > Records for each of the entries ### Response: def medlineParser(pubFile): """Parses a medline file, _pubFile_, to extract the individual entries as [MedlineRecords](#metaknowledge.medline.recordMedline.MedlineRecord). A medline file is a series of entries, each entry is a series of tags. A tag is a 2 to 4 character string each tag is padded with spaces on the left to make it 4 characters which is followed by a dash and a space (`'- '`). Everything after the tag and on all lines after it not starting with a tag is considered associated with the tag. Each entry's first tag is `PMID`, so a first line looks something like `PMID- 26524502`. Entries end with a single blank line. # Parameters _pubFile_ : `str` > A path to a valid medline file, use [isMedlineFile](#metaknowledge.medline.medlineHandlers.isMedlineFile) to verify # Returns `set[MedlineRecord]` > Records for each of the entries """ #assumes the file is MEDLINE recSet = set() error = None lineNum = 0 try: with open(pubFile, 'r', encoding = 'latin-1') as openfile: f = enumerate(openfile, start = 1) lineNum, line = next(f) try: while True: if line.startswith("PMID- "): try: r = MedlineRecord(itertools.chain([(lineNum, line)], f), sFile = pubFile, sLine = lineNum) recSet.add(r) except BadPubmedFile as e: badLine = lineNum try: lineNum, line = next(f) while not line.startswith("PMID- "): lineNum, line = next(f) except (StopIteration, UnicodeDecodeError) as e: if error is None: error = BadPubmedFile("The file '{}' becomes unparsable after line: {}, due to the error: {} ".format(pubFile, badLine, e)) raise e elif line != '\n': if error is None: error = BadPubmedFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(pubFile, lineNum)) lineNum, line = next(f) except StopIteration: #End of the file has been reached pass except UnicodeDecodeError: if error is None: error = BadPubmedFile("The file '{}' has parts of it that are unparsable starting at line: {}.".format(pubFile, lineNum)) return recSet, error
def metablock(parsed): """ Remove HTML tags, entities and superfluous characters from meta blocks. """ parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",") return escape(strip_tags(decode_entities(parsed)))
Remove HTML tags, entities and superfluous characters from meta blocks.
Below is the the instruction that describes the task: ### Input: Remove HTML tags, entities and superfluous characters from meta blocks. ### Response: def metablock(parsed): """ Remove HTML tags, entities and superfluous characters from meta blocks. """ parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",") return escape(strip_tags(decode_entities(parsed)))
def parse_options(settings): '''Parse command line options''' optlist, args = getopt(sys.argv, 'x', []) settings['configfile'] = args[1] return settings
Parse command line options
Below is the the instruction that describes the task: ### Input: Parse command line options ### Response: def parse_options(settings): '''Parse command line options''' optlist, args = getopt(sys.argv, 'x', []) settings['configfile'] = args[1] return settings
def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
Midpoint of this interval product.
Below is the the instruction that describes the task: ### Input: Midpoint of this interval product. ### Response: def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
def getEdges(self, fromVol): """ Return the edges available from fromVol. """ if fromVol is None: for toVol in self.paths: yield Store.Diff(self, toVol, fromVol, toVol.size) return if fromVol not in self.paths: return fromBVol = self.butterVolumes[fromVol.uuid] parentUUID = fromBVol.parent_uuid butterDir = os.path.dirname(fromBVol.fullPath) vols = [vol for vol in self.butterVolumes.values() if vol.parent_uuid == parentUUID or os.path.dirname(vol.fullPath) == butterDir ] changeRate = self._calcChangeRate(vols) for toBVol in vols: if toBVol == fromBVol: continue # This gives a conservative estimate of the size of the diff estimatedSize = self._estimateSize(toBVol, fromBVol, changeRate) toVol = self._btrfsVol2StoreVol(toBVol) yield Store.Diff(self, toVol, fromVol, estimatedSize, sizeIsEstimated=True)
Return the edges available from fromVol.
Below is the the instruction that describes the task: ### Input: Return the edges available from fromVol. ### Response: def getEdges(self, fromVol): """ Return the edges available from fromVol. """ if fromVol is None: for toVol in self.paths: yield Store.Diff(self, toVol, fromVol, toVol.size) return if fromVol not in self.paths: return fromBVol = self.butterVolumes[fromVol.uuid] parentUUID = fromBVol.parent_uuid butterDir = os.path.dirname(fromBVol.fullPath) vols = [vol for vol in self.butterVolumes.values() if vol.parent_uuid == parentUUID or os.path.dirname(vol.fullPath) == butterDir ] changeRate = self._calcChangeRate(vols) for toBVol in vols: if toBVol == fromBVol: continue # This gives a conservative estimate of the size of the diff estimatedSize = self._estimateSize(toBVol, fromBVol, changeRate) toVol = self._btrfsVol2StoreVol(toBVol) yield Store.Diff(self, toVol, fromVol, estimatedSize, sizeIsEstimated=True)
def get(filename, section, parameter): ''' Get a value from an OpenStack configuration file. filename The full path to the configuration file section The section from which to search for the parameter parameter The parameter to return CLI Example: .. code-block:: bash salt-call openstack_config.get /etc/keystone/keystone.conf sql connection ''' filename = _quote(filename) section = _quote(section) parameter = _quote(parameter) result = __salt__['cmd.run_all']( 'openstack-config --get {0} {1} {2}'.format( filename, section, parameter ), python_shell=False, ) if result['retcode'] == 0: return result['stdout'] else: raise salt.exceptions.CommandExecutionError(result['stderr'])
Get a value from an OpenStack configuration file. filename The full path to the configuration file section The section from which to search for the parameter parameter The parameter to return CLI Example: .. code-block:: bash salt-call openstack_config.get /etc/keystone/keystone.conf sql connection
Below is the the instruction that describes the task: ### Input: Get a value from an OpenStack configuration file. filename The full path to the configuration file section The section from which to search for the parameter parameter The parameter to return CLI Example: .. code-block:: bash salt-call openstack_config.get /etc/keystone/keystone.conf sql connection ### Response: def get(filename, section, parameter): ''' Get a value from an OpenStack configuration file. filename The full path to the configuration file section The section from which to search for the parameter parameter The parameter to return CLI Example: .. code-block:: bash salt-call openstack_config.get /etc/keystone/keystone.conf sql connection ''' filename = _quote(filename) section = _quote(section) parameter = _quote(parameter) result = __salt__['cmd.run_all']( 'openstack-config --get {0} {1} {2}'.format( filename, section, parameter ), python_shell=False, ) if result['retcode'] == 0: return result['stdout'] else: raise salt.exceptions.CommandExecutionError(result['stderr'])
def update_combobox(self): """Recreates the combobox contents.""" index = self.current_scheme_index self.schemes_combobox.blockSignals(True) names = self.get_option("names") try: names.pop(names.index(u'Custom')) except ValueError: pass custom_names = self.get_option("custom_names", []) # Useful for retrieving the actual data for n in names + custom_names: self.scheme_choices_dict[self.get_option('{0}/name'.format(n))] = n if custom_names: choices = names + [None] + custom_names else: choices = names combobox = self.schemes_combobox combobox.clear() for name in choices: if name is None: continue combobox.addItem(self.get_option('{0}/name'.format(name)), name) if custom_names: combobox.insertSeparator(len(names)) self.schemes_combobox.blockSignals(False) self.schemes_combobox.setCurrentIndex(index)
Recreates the combobox contents.
Below is the the instruction that describes the task: ### Input: Recreates the combobox contents. ### Response: def update_combobox(self): """Recreates the combobox contents.""" index = self.current_scheme_index self.schemes_combobox.blockSignals(True) names = self.get_option("names") try: names.pop(names.index(u'Custom')) except ValueError: pass custom_names = self.get_option("custom_names", []) # Useful for retrieving the actual data for n in names + custom_names: self.scheme_choices_dict[self.get_option('{0}/name'.format(n))] = n if custom_names: choices = names + [None] + custom_names else: choices = names combobox = self.schemes_combobox combobox.clear() for name in choices: if name is None: continue combobox.addItem(self.get_option('{0}/name'.format(name)), name) if custom_names: combobox.insertSeparator(len(names)) self.schemes_combobox.blockSignals(False) self.schemes_combobox.setCurrentIndex(index)
def _get_or_update_parent(key, val, to_str, parent=None, **options): """ :param key: Key of current child (dict{,-like} object) :param val: Value of current child (dict{,-like} object or [dict{,...}]) :param to_str: Callable to convert value to string :param parent: XML ElementTree parent node object or None :param options: Keyword options, see :func:`container_to_etree` """ elem = ET.Element(key) vals = val if anyconfig.utils.is_iterable(val) else [val] for val_ in vals: container_to_etree(val_, parent=elem, to_str=to_str, **options) if parent is None: # 'elem' is the top level etree. return elem parent.append(elem) return parent
:param key: Key of current child (dict{,-like} object) :param val: Value of current child (dict{,-like} object or [dict{,...}]) :param to_str: Callable to convert value to string :param parent: XML ElementTree parent node object or None :param options: Keyword options, see :func:`container_to_etree`
Below is the the instruction that describes the task: ### Input: :param key: Key of current child (dict{,-like} object) :param val: Value of current child (dict{,-like} object or [dict{,...}]) :param to_str: Callable to convert value to string :param parent: XML ElementTree parent node object or None :param options: Keyword options, see :func:`container_to_etree` ### Response: def _get_or_update_parent(key, val, to_str, parent=None, **options): """ :param key: Key of current child (dict{,-like} object) :param val: Value of current child (dict{,-like} object or [dict{,...}]) :param to_str: Callable to convert value to string :param parent: XML ElementTree parent node object or None :param options: Keyword options, see :func:`container_to_etree` """ elem = ET.Element(key) vals = val if anyconfig.utils.is_iterable(val) else [val] for val_ in vals: container_to_etree(val_, parent=elem, to_str=to_str, **options) if parent is None: # 'elem' is the top level etree. return elem parent.append(elem) return parent
def list_results(self, number, username): """ [deprecated] 建議使用方法 `get_question_results()` """ # 取得新 API 的結果 data = self.get_question_results(number, username) # 實作相容的結構 result = [] for number in data: # 儲存題目資訊 result += [(number, data[number])] # 回傳結果 return result
[deprecated] 建議使用方法 `get_question_results()`
Below is the the instruction that describes the task: ### Input: [deprecated] 建議使用方法 `get_question_results()` ### Response: def list_results(self, number, username): """ [deprecated] 建議使用方法 `get_question_results()` """ # 取得新 API 的結果 data = self.get_question_results(number, username) # 實作相容的結構 result = [] for number in data: # 儲存題目資訊 result += [(number, data[number])] # 回傳結果 return result
def plot(self, y_axis='attenuation', x_axis='energy', logx=False, logy=False, mixed=True, all_layers=False, all_elements=False, all_isotopes=False, items_to_plot=None, time_unit='us', offset_us=0., source_to_detector_m=16., time_resolution_us=0.16, t_start_us=1, plotly=False, ax_mpl=None, fmt='-', ms='2', lw='1.5', alpha=1): # offset delay values is normal 2.99 us with NONE actual MCP delay settings """display the transmission or attenuation of compound, element and/or isotopes specified Parameters: =========== :param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number'] :type x_axis: str :param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm'] :type y_axis: str :param logx: True -> display x in log scale :type logx: boolean. :param logy: True -> display y in log scale :type logy: boolean. :param mixed: boolean. True -> display the total of each layer False -> not displayed :param all_layers: boolean. True -> display all layers False -> not displayed :param all_elements: boolean. True -> display all elements signal False -> not displayed :param all_isotopes: boolean. True -> display all isotopes signal False -> not displayed :param items_to_plot: array that describes what to plot ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either ['s'|'us'|'ns'] Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :param plotly: control to use plotly to display or not. :type plotly: bool :param ax_mpl: matplotlib.axes to plot against :type ax_mpl: matplotlib.axes :param fmt: matplotlib.axes.plot kwargs :type fmt: str :param ms: matplotlib.axes.plot kwargs :type ms: float :param lw: matplotlib.axes.plot kwargs :type lw: float :param alpha: matplotlib.axes.plot kwargs :type alpha: float """ if x_axis not in x_type_list: raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list)) if time_unit not in time_unit_list: raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list)) if y_axis not in y_type_list: raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list)) # figure size # plt.figure(figsize=(8, 8)) # stack from self _stack_signal = self.stack_signal _stack = self.stack _stack_sigma = self.stack_sigma _x_axis = self.total_signal['energy_eV'] x_axis_label = None # Creating the matplotlib graph.. if ax_mpl is None: fig_mpl, ax_mpl = plt.subplots() """X-axis""" # determine values and labels for x-axis with options from # 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)' if x_axis == 'energy': x_axis_label = 'Energy (eV)' if x_axis == 'lambda': x_axis_label = u"Wavelength (\u212B)" _x_axis = _utilities.ev_to_angstroms(array=_x_axis) if x_axis == 'time': if time_unit == 's': x_axis_label = 'Time (s)' _x_axis = _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'us': x_axis_label = 'Time (us)' _x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'ns': x_axis_label = 'Time (ns)' _x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}" .format(x_axis_label, source_to_detector_m, offset_us)) if x_axis == 'number': x_axis_label = 'Image number (#)' _x_axis = _utilities.ev_to_image_number(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us, time_resolution_us=time_resolution_us, t_start_us=t_start_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}" .format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us)) if x_axis_label is None: raise ValueError("x_axis_label does NOT exist, please check.") """Y-axis""" # determine to plot transmission or attenuation # determine to put transmission or attenuation words for y-axis y_axis_tag = y_axis if y_axis == 'transmission': y_axis_label = 'Neutron Transmission' elif y_axis == 'attenuation': y_axis_label = 'Neutron Attenuation' elif y_axis == 'sigma': y_axis_tag = 'sigma_b' y_axis_label = 'Cross-section (barns)' elif y_axis == 'sigma_raw': y_axis_tag = 'sigma_b_raw' y_axis_label = 'Cross-section (barns)' else: y_axis_tag = 'miu_per_cm' y_axis_label = "Attenuation coefficient (cm\u207B\u00B9)" if y_axis_tag[:5] == 'sigma': mixed = False all_layers = False print("'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False'") if y_axis_tag[-3:] == 'raw': all_elements = False print("'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'") if y_axis_tag == 'miu_per_cm': mixed = False print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'") # Plotting begins if mixed: _y_axis = self.total_signal[y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="Total") if all_layers: for _compound in _stack.keys(): _y_axis = _stack_signal[_compound][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_compound) if all_elements: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: if y_axis_tag[:5] != 'sigma': _y_axis = _stack_signal[_compound][_element][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}".format(_compound, _element)) else: _y_axis = _stack_sigma[_compound][_element]['sigma_b'] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}".format(_compound, _element)) if all_isotopes: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: for _isotope in _stack[_compound][_element]['isotopes']['list']: if y_axis_tag[:5] != 'sigma': _y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}/{}".format(_compound, _element, _isotope)) else: _y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}/{}".format(_compound, _element, _isotope)) """Y-axis for specified items_to_plot""" if items_to_plot is not None: for _path_to_plot in items_to_plot: _path_to_plot = list(_path_to_plot) if y_axis_tag[:5] != 'sigma': _live_path = _stack_signal else: _len_of_path = len(_path_to_plot) if y_axis_tag[-3:] == 'raw': if _len_of_path < 3: raise ValueError("'y_axis={}' is not supported for layer or element levels '{}'.".format( y_axis_tag, _path_to_plot[-1])) else: if _len_of_path < 2: raise ValueError("'y_axis={}' is not supported for layer level '{}'.".format( y_axis_tag, _path_to_plot[-1])) _live_path = _stack_sigma _label = "/".join(_path_to_plot) while _path_to_plot: _item = _path_to_plot.pop(0) _live_path = _live_path[_item] _y_axis = _live_path[y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_label) if y_axis_tag[:5] != 'sigma' and y_axis_tag != 'miu_per_cm': ax_mpl.set_ylim(-0.01, 1.01) if logy is True: ax_mpl.set_yscale('log') if logx is True: ax_mpl.set_xscale('log') ax_mpl.set_xlabel(x_axis_label) ax_mpl.set_ylabel(y_axis_label) if not plotly: ax_mpl.legend(loc='best') # plt.tight_layout() return ax_mpl else: fig_mpl = ax_mpl.get_figure() plotly_fig = tls.mpl_to_plotly(fig_mpl) plotly_fig.layout.showlegend = True return plotly_fig
display the transmission or attenuation of compound, element and/or isotopes specified Parameters: =========== :param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number'] :type x_axis: str :param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm'] :type y_axis: str :param logx: True -> display x in log scale :type logx: boolean. :param logy: True -> display y in log scale :type logy: boolean. :param mixed: boolean. True -> display the total of each layer False -> not displayed :param all_layers: boolean. True -> display all layers False -> not displayed :param all_elements: boolean. True -> display all elements signal False -> not displayed :param all_isotopes: boolean. True -> display all isotopes signal False -> not displayed :param items_to_plot: array that describes what to plot ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either ['s'|'us'|'ns'] Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :param plotly: control to use plotly to display or not. :type plotly: bool :param ax_mpl: matplotlib.axes to plot against :type ax_mpl: matplotlib.axes :param fmt: matplotlib.axes.plot kwargs :type fmt: str :param ms: matplotlib.axes.plot kwargs :type ms: float :param lw: matplotlib.axes.plot kwargs :type lw: float :param alpha: matplotlib.axes.plot kwargs :type alpha: float
Below is the the instruction that describes the task: ### Input: display the transmission or attenuation of compound, element and/or isotopes specified Parameters: =========== :param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number'] :type x_axis: str :param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm'] :type y_axis: str :param logx: True -> display x in log scale :type logx: boolean. :param logy: True -> display y in log scale :type logy: boolean. :param mixed: boolean. True -> display the total of each layer False -> not displayed :param all_layers: boolean. True -> display all layers False -> not displayed :param all_elements: boolean. True -> display all elements signal False -> not displayed :param all_isotopes: boolean. True -> display all isotopes signal False -> not displayed :param items_to_plot: array that describes what to plot ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either ['s'|'us'|'ns'] Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :param plotly: control to use plotly to display or not. :type plotly: bool :param ax_mpl: matplotlib.axes to plot against :type ax_mpl: matplotlib.axes :param fmt: matplotlib.axes.plot kwargs :type fmt: str :param ms: matplotlib.axes.plot kwargs :type ms: float :param lw: matplotlib.axes.plot kwargs :type lw: float :param alpha: matplotlib.axes.plot kwargs :type alpha: float ### Response: def plot(self, y_axis='attenuation', x_axis='energy', logx=False, logy=False, mixed=True, all_layers=False, all_elements=False, all_isotopes=False, items_to_plot=None, time_unit='us', offset_us=0., source_to_detector_m=16., time_resolution_us=0.16, t_start_us=1, plotly=False, ax_mpl=None, fmt='-', ms='2', lw='1.5', alpha=1): # offset delay values is normal 2.99 us with NONE actual MCP delay settings """display the transmission or attenuation of compound, element and/or isotopes specified Parameters: =========== :param x_axis: x type for export. Must be either ['energy'|'lambda'|'time'|'number'] :type x_axis: str :param y_axis: y type for export. Must be either ['transmission'|'attenuation'|'sigma'|'sigma_raw'|'miu_per_cm'] :type y_axis: str :param logx: True -> display x in log scale :type logx: boolean. :param logy: True -> display y in log scale :type logy: boolean. :param mixed: boolean. True -> display the total of each layer False -> not displayed :param all_layers: boolean. True -> display all layers False -> not displayed :param all_elements: boolean. True -> display all elements signal False -> not displayed :param all_isotopes: boolean. True -> display all isotopes signal False -> not displayed :param items_to_plot: array that describes what to plot ex: [['CoAg','Ag','107-Ag'], ['CoAg']] if the dictionary is empty, everything is exported :param time_unit: string. Must be either ['s'|'us'|'ns'] Note: this will be used only when x_axis='time' :param offset_us: default: 0 Note: only used when x_axis='number' or 'time' :param source_to_detector_m: Note: this will be used only when x_axis='number' or 'time' :param time_resolution_us: Note: this will be used only when x_axis='number' :param t_start_us: when is the first acquisition occurred. default: 1 Note: this will be used only when x_axis='number' :param plotly: control to use plotly to display or not. :type plotly: bool :param ax_mpl: matplotlib.axes to plot against :type ax_mpl: matplotlib.axes :param fmt: matplotlib.axes.plot kwargs :type fmt: str :param ms: matplotlib.axes.plot kwargs :type ms: float :param lw: matplotlib.axes.plot kwargs :type lw: float :param alpha: matplotlib.axes.plot kwargs :type alpha: float """ if x_axis not in x_type_list: raise ValueError("Please specify the x-axis type using one from '{}'.".format(x_type_list)) if time_unit not in time_unit_list: raise ValueError("Please specify the time unit using one from '{}'.".format(time_unit_list)) if y_axis not in y_type_list: raise ValueError("Please specify the y-axis type using one from '{}'.".format(y_type_list)) # figure size # plt.figure(figsize=(8, 8)) # stack from self _stack_signal = self.stack_signal _stack = self.stack _stack_sigma = self.stack_sigma _x_axis = self.total_signal['energy_eV'] x_axis_label = None # Creating the matplotlib graph.. if ax_mpl is None: fig_mpl, ax_mpl = plt.subplots() """X-axis""" # determine values and labels for x-axis with options from # 'energy(eV)' & 'lambda(A)' & 'time(us)' & 'image number(#)' if x_axis == 'energy': x_axis_label = 'Energy (eV)' if x_axis == 'lambda': x_axis_label = u"Wavelength (\u212B)" _x_axis = _utilities.ev_to_angstroms(array=_x_axis) if x_axis == 'time': if time_unit == 's': x_axis_label = 'Time (s)' _x_axis = _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'us': x_axis_label = 'Time (us)' _x_axis = 1e6 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) if time_unit == 'ns': x_axis_label = 'Time (ns)' _x_axis = 1e9 * _utilities.ev_to_s(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}" .format(x_axis_label, source_to_detector_m, offset_us)) if x_axis == 'number': x_axis_label = 'Image number (#)' _x_axis = _utilities.ev_to_image_number(array=_x_axis, source_to_detector_m=source_to_detector_m, offset_us=offset_us, time_resolution_us=time_resolution_us, t_start_us=t_start_us) print("'{}' was obtained with the following:\nsource_to_detector_m={}\noffset_us={}\ntime_resolution_us={}" .format(x_axis_label, source_to_detector_m, offset_us, time_resolution_us)) if x_axis_label is None: raise ValueError("x_axis_label does NOT exist, please check.") """Y-axis""" # determine to plot transmission or attenuation # determine to put transmission or attenuation words for y-axis y_axis_tag = y_axis if y_axis == 'transmission': y_axis_label = 'Neutron Transmission' elif y_axis == 'attenuation': y_axis_label = 'Neutron Attenuation' elif y_axis == 'sigma': y_axis_tag = 'sigma_b' y_axis_label = 'Cross-section (barns)' elif y_axis == 'sigma_raw': y_axis_tag = 'sigma_b_raw' y_axis_label = 'Cross-section (barns)' else: y_axis_tag = 'miu_per_cm' y_axis_label = "Attenuation coefficient (cm\u207B\u00B9)" if y_axis_tag[:5] == 'sigma': mixed = False all_layers = False print("'y_axis='sigma'' is selected. Auto force 'mixed=False', 'all_layers=False'") if y_axis_tag[-3:] == 'raw': all_elements = False print("'y_axis='sigma_raw'' is selected. Auto force 'all_elements=False'") if y_axis_tag == 'miu_per_cm': mixed = False print("'y_axis='miu_per_cm'' is selected. Auto force 'mixed=False'") # Plotting begins if mixed: _y_axis = self.total_signal[y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="Total") if all_layers: for _compound in _stack.keys(): _y_axis = _stack_signal[_compound][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_compound) if all_elements: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: if y_axis_tag[:5] != 'sigma': _y_axis = _stack_signal[_compound][_element][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}".format(_compound, _element)) else: _y_axis = _stack_sigma[_compound][_element]['sigma_b'] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}".format(_compound, _element)) if all_isotopes: for _compound in _stack.keys(): for _element in _stack[_compound]['elements']: for _isotope in _stack[_compound][_element]['isotopes']['list']: if y_axis_tag[:5] != 'sigma': _y_axis = _stack_signal[_compound][_element][_isotope][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}/{}".format(_compound, _element, _isotope)) else: _y_axis = _stack_sigma[_compound][_element][_isotope][y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label="{}/{}/{}".format(_compound, _element, _isotope)) """Y-axis for specified items_to_plot""" if items_to_plot is not None: for _path_to_plot in items_to_plot: _path_to_plot = list(_path_to_plot) if y_axis_tag[:5] != 'sigma': _live_path = _stack_signal else: _len_of_path = len(_path_to_plot) if y_axis_tag[-3:] == 'raw': if _len_of_path < 3: raise ValueError("'y_axis={}' is not supported for layer or element levels '{}'.".format( y_axis_tag, _path_to_plot[-1])) else: if _len_of_path < 2: raise ValueError("'y_axis={}' is not supported for layer level '{}'.".format( y_axis_tag, _path_to_plot[-1])) _live_path = _stack_sigma _label = "/".join(_path_to_plot) while _path_to_plot: _item = _path_to_plot.pop(0) _live_path = _live_path[_item] _y_axis = _live_path[y_axis_tag] ax_mpl.plot(_x_axis, _y_axis, fmt, ms=ms, lw=lw, alpha=alpha, label=_label) if y_axis_tag[:5] != 'sigma' and y_axis_tag != 'miu_per_cm': ax_mpl.set_ylim(-0.01, 1.01) if logy is True: ax_mpl.set_yscale('log') if logx is True: ax_mpl.set_xscale('log') ax_mpl.set_xlabel(x_axis_label) ax_mpl.set_ylabel(y_axis_label) if not plotly: ax_mpl.legend(loc='best') # plt.tight_layout() return ax_mpl else: fig_mpl = ax_mpl.get_figure() plotly_fig = tls.mpl_to_plotly(fig_mpl) plotly_fig.layout.showlegend = True return plotly_fig
def number(self, p_todo): """ Returns the line number or text ID of a todo (depends on the configuration. """ if config().identifiers() == "text": return self.uid(p_todo) else: return self.linenumber(p_todo)
Returns the line number or text ID of a todo (depends on the configuration.
Below is the the instruction that describes the task: ### Input: Returns the line number or text ID of a todo (depends on the configuration. ### Response: def number(self, p_todo): """ Returns the line number or text ID of a todo (depends on the configuration. """ if config().identifiers() == "text": return self.uid(p_todo) else: return self.linenumber(p_todo)
def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient: """Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers """ if len(servers) > 1: sorted_servers = [ server_url for (server_url, _) in sort_servers_closest(servers) ] log.info( 'Automatically selecting matrix homeserver based on RTT', sorted_servers=sorted_servers, ) elif len(servers) == 1: sorted_servers = servers else: raise TransportError('No valid servers list given') last_ex = None for server_url in sorted_servers: server_url: str = server_url client = GMatrixClient(server_url, *args, **kwargs) try: client.api._send('GET', '/versions', api_path='/_matrix/client') except MatrixError as ex: log.warning('Selected server not usable', server_url=server_url, _exception=ex) last_ex = ex else: break else: raise TransportError( 'Unable to find a reachable Matrix server. Please check your network connectivity.', ) from last_ex return client
Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers
Below is the the instruction that describes the task: ### Input: Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers ### Response: def make_client(servers: Sequence[str], *args, **kwargs) -> GMatrixClient: """Given a list of possible servers, chooses the closest available and create a GMatrixClient Params: servers: list of servers urls, with scheme (http or https) Rest of args and kwargs are forwarded to GMatrixClient constructor Returns: GMatrixClient instance for one of the available servers """ if len(servers) > 1: sorted_servers = [ server_url for (server_url, _) in sort_servers_closest(servers) ] log.info( 'Automatically selecting matrix homeserver based on RTT', sorted_servers=sorted_servers, ) elif len(servers) == 1: sorted_servers = servers else: raise TransportError('No valid servers list given') last_ex = None for server_url in sorted_servers: server_url: str = server_url client = GMatrixClient(server_url, *args, **kwargs) try: client.api._send('GET', '/versions', api_path='/_matrix/client') except MatrixError as ex: log.warning('Selected server not usable', server_url=server_url, _exception=ex) last_ex = ex else: break else: raise TransportError( 'Unable to find a reachable Matrix server. Please check your network connectivity.', ) from last_ex return client
def setAttributeNS(self, namespaceURI, localName, value): ''' Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute ''' prefix = None if namespaceURI: try: prefix = self.getPrefix(namespaceURI) except KeyError, ex: prefix = 'ns2' self.setNamespaceAttribute(prefix, namespaceURI) qualifiedName = localName if prefix: qualifiedName = '%s:%s' %(prefix, localName) self._setAttributeNS(namespaceURI, qualifiedName, value)
Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute
Below is the the instruction that describes the task: ### Input: Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute ### Response: def setAttributeNS(self, namespaceURI, localName, value): ''' Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute ''' prefix = None if namespaceURI: try: prefix = self.getPrefix(namespaceURI) except KeyError, ex: prefix = 'ns2' self.setNamespaceAttribute(prefix, namespaceURI) qualifiedName = localName if prefix: qualifiedName = '%s:%s' %(prefix, localName) self._setAttributeNS(namespaceURI, qualifiedName, value)
def y_fit(self): """ Using the result of the linear least squares, the result of :math:`X_{ij}\\beta_i` """ if self._y_fit is None: self._y_fit = _np.dot(self.X_unweighted, self.beta) return self._y_fit
Using the result of the linear least squares, the result of :math:`X_{ij}\\beta_i`
Below is the the instruction that describes the task: ### Input: Using the result of the linear least squares, the result of :math:`X_{ij}\\beta_i` ### Response: def y_fit(self): """ Using the result of the linear least squares, the result of :math:`X_{ij}\\beta_i` """ if self._y_fit is None: self._y_fit = _np.dot(self.X_unweighted, self.beta) return self._y_fit
def eff_default_transformer(fills=EFF_DEFAULT_FILLS): """ Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect. """ def _transformer(vals): if len(vals) == 0: return fills else: # ignore all but first effect match_eff_main = _prog_eff_main.match(vals[0]) if match_eff_main is None: logging.warning( 'match_eff_main is None: vals={}'.format(str(vals[0])) ) return fills eff = [match_eff_main.group(1)] \ + match_eff_main.group(2).split(b'|') result = tuple( fill if v == b'' else int(v) if i == 5 or i == 10 else (1 if v == b'CODING' else 0) if i == 8 else v for i, (v, fill) in enumerate(list(zip(eff, fills))[:11]) ) return result return _transformer
Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect.
Below is the the instruction that describes the task: ### Input: Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect. ### Response: def eff_default_transformer(fills=EFF_DEFAULT_FILLS): """ Return a simple transformer function for parsing EFF annotations. N.B., ignores all but the first effect. """ def _transformer(vals): if len(vals) == 0: return fills else: # ignore all but first effect match_eff_main = _prog_eff_main.match(vals[0]) if match_eff_main is None: logging.warning( 'match_eff_main is None: vals={}'.format(str(vals[0])) ) return fills eff = [match_eff_main.group(1)] \ + match_eff_main.group(2).split(b'|') result = tuple( fill if v == b'' else int(v) if i == 5 or i == 10 else (1 if v == b'CODING' else 0) if i == 8 else v for i, (v, fill) in enumerate(list(zip(eff, fills))[:11]) ) return result return _transformer
def _WorkerCommand_launcher(self): """Return list commands to start the bootstrap process""" return [ self.workersArguments.pythonExecutable, '-m', 'scoop.launch.__main__', str(self.workerAmount), str(self.workersArguments.verbose), ]
Return list commands to start the bootstrap process
Below is the the instruction that describes the task: ### Input: Return list commands to start the bootstrap process ### Response: def _WorkerCommand_launcher(self): """Return list commands to start the bootstrap process""" return [ self.workersArguments.pythonExecutable, '-m', 'scoop.launch.__main__', str(self.workerAmount), str(self.workersArguments.verbose), ]
def dist_minkowski(src, tar, qval=2, pval=1, alphabet=None): """Return normalized Minkowski distance of two strings. This is a wrapper for :py:meth:`Minkowski.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version pval : int or float The :math:`p`-value of the :math:`L^p`-space alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Minkowski distance Examples -------- >>> dist_minkowski('cat', 'hat') 0.5 >>> round(dist_minkowski('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_minkowski('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_minkowski('ATCG', 'TAGC') 1.0 """ return Minkowski().dist(src, tar, qval, pval, alphabet)
Return normalized Minkowski distance of two strings. This is a wrapper for :py:meth:`Minkowski.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version pval : int or float The :math:`p`-value of the :math:`L^p`-space alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Minkowski distance Examples -------- >>> dist_minkowski('cat', 'hat') 0.5 >>> round(dist_minkowski('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_minkowski('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_minkowski('ATCG', 'TAGC') 1.0
Below is the the instruction that describes the task: ### Input: Return normalized Minkowski distance of two strings. This is a wrapper for :py:meth:`Minkowski.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version pval : int or float The :math:`p`-value of the :math:`L^p`-space alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Minkowski distance Examples -------- >>> dist_minkowski('cat', 'hat') 0.5 >>> round(dist_minkowski('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_minkowski('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_minkowski('ATCG', 'TAGC') 1.0 ### Response: def dist_minkowski(src, tar, qval=2, pval=1, alphabet=None): """Return normalized Minkowski distance of two strings. This is a wrapper for :py:meth:`Minkowski.dist`. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison qval : int The length of each q-gram; 0 for non-q-gram version pval : int or float The :math:`p`-value of the :math:`L^p`-space alphabet : collection or int The values or size of the alphabet Returns ------- float The normalized Minkowski distance Examples -------- >>> dist_minkowski('cat', 'hat') 0.5 >>> round(dist_minkowski('Niall', 'Neil'), 12) 0.636363636364 >>> round(dist_minkowski('Colin', 'Cuilen'), 12) 0.692307692308 >>> dist_minkowski('ATCG', 'TAGC') 1.0 """ return Minkowski().dist(src, tar, qval, pval, alphabet)
def find(self, name, current_location): """Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found.""" assert isinstance(name, basestring) assert isinstance(current_location, basestring) project_module = None # Try interpreting name as project id. if name[0] == '/': project_module = self.id2module.get(name) if not project_module: location = os.path.join(current_location, name) # If no project is registered for the given location, try to # load it. First see if we have Jamfile. If not we might have project # root, willing to act as Jamfile. In that case, project-root # must be placed in the directory referred by id. project_module = self.module_name(location) if not project_module in self.jamfile_modules: if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): project_module = self.load(location) else: project_module = None return project_module
Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found.
Below is the the instruction that describes the task: ### Input: Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found. ### Response: def find(self, name, current_location): """Given 'name' which can be project-id or plain directory name, return project module corresponding to that id or directory. Returns nothing of project is not found.""" assert isinstance(name, basestring) assert isinstance(current_location, basestring) project_module = None # Try interpreting name as project id. if name[0] == '/': project_module = self.id2module.get(name) if not project_module: location = os.path.join(current_location, name) # If no project is registered for the given location, try to # load it. First see if we have Jamfile. If not we might have project # root, willing to act as Jamfile. In that case, project-root # must be placed in the directory referred by id. project_module = self.module_name(location) if not project_module in self.jamfile_modules: if b2.util.path.glob([location], self.JAMROOT + self.JAMFILE): project_module = self.load(location) else: project_module = None return project_module
def cmd_time(self, args): '''show autopilot time''' tusec = self.master.field('SYSTEM_TIME', 'time_unix_usec', 0) if tusec == 0: print("No SYSTEM_TIME time available") return print("%s (%s)\n" % (time.ctime(tusec * 1.0e-6), time.ctime()))
show autopilot time
Below is the the instruction that describes the task: ### Input: show autopilot time ### Response: def cmd_time(self, args): '''show autopilot time''' tusec = self.master.field('SYSTEM_TIME', 'time_unix_usec', 0) if tusec == 0: print("No SYSTEM_TIME time available") return print("%s (%s)\n" % (time.ctime(tusec * 1.0e-6), time.ctime()))
def _get_method(method): """Return an imported object. :param method: ``str`` DOT notation for import with Colin used to separate the class used for the job. :returns: ``object`` Loaded class object from imported method. """ # Split the class out from the job module = method.split(':') # Set the import module _module_import = module[0] # Set the class name to use class_name = module[-1] # import the module module_import = __import__(_module_import, fromlist=[class_name]) # Return the attributes for the imported module and class return getattr(module_import, class_name)
Return an imported object. :param method: ``str`` DOT notation for import with Colin used to separate the class used for the job. :returns: ``object`` Loaded class object from imported method.
Below is the the instruction that describes the task: ### Input: Return an imported object. :param method: ``str`` DOT notation for import with Colin used to separate the class used for the job. :returns: ``object`` Loaded class object from imported method. ### Response: def _get_method(method): """Return an imported object. :param method: ``str`` DOT notation for import with Colin used to separate the class used for the job. :returns: ``object`` Loaded class object from imported method. """ # Split the class out from the job module = method.split(':') # Set the import module _module_import = module[0] # Set the class name to use class_name = module[-1] # import the module module_import = __import__(_module_import, fromlist=[class_name]) # Return the attributes for the imported module and class return getattr(module_import, class_name)
def rm_empty_indices(*args): """ Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim. """ rm_inds = args[0] if not rm_inds: return args[1:] keep_inds = [i for i in range(len(args[1])) if i not in rm_inds] return [[a[i] for i in keep_inds] for a in args[1:]]
Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim.
Below is the the instruction that describes the task: ### Input: Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim. ### Response: def rm_empty_indices(*args): """ Remove unwanted list indices. First argument is the list of indices to remove. Other elements are the lists to trim. """ rm_inds = args[0] if not rm_inds: return args[1:] keep_inds = [i for i in range(len(args[1])) if i not in rm_inds] return [[a[i] for i in keep_inds] for a in args[1:]]
def relationship_descriptor(action, *names): """ Wrap a function for modification of a relationship. This allows for specific handling for serialization and deserialization. :param action: The RelationshipActions that this descriptor performs :param names: A list of names of the relationships this references """ if isinstance(action, RelationshipActions): action = [action] def wrapped(fn): if not hasattr(fn, '__jsonapi_action__'): fn.__jsonapi_action__ = set() fn.__jsonapi_desc_for_rels__ = set() fn.__jsonapi_desc_for_rels__ |= set(names) fn.__jsonapi_action__ |= set(action) return fn return wrapped
Wrap a function for modification of a relationship. This allows for specific handling for serialization and deserialization. :param action: The RelationshipActions that this descriptor performs :param names: A list of names of the relationships this references
Below is the the instruction that describes the task: ### Input: Wrap a function for modification of a relationship. This allows for specific handling for serialization and deserialization. :param action: The RelationshipActions that this descriptor performs :param names: A list of names of the relationships this references ### Response: def relationship_descriptor(action, *names): """ Wrap a function for modification of a relationship. This allows for specific handling for serialization and deserialization. :param action: The RelationshipActions that this descriptor performs :param names: A list of names of the relationships this references """ if isinstance(action, RelationshipActions): action = [action] def wrapped(fn): if not hasattr(fn, '__jsonapi_action__'): fn.__jsonapi_action__ = set() fn.__jsonapi_desc_for_rels__ = set() fn.__jsonapi_desc_for_rels__ |= set(names) fn.__jsonapi_action__ |= set(action) return fn return wrapped
def cut_distant_injections(workflow, inj_file, out_dir, tags=None): "Set up a job for removing injections that are too distant to be seen" if tags is None: tags = [] node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--input', inj_file) node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file') workflow += node return node.output_files[0]
Set up a job for removing injections that are too distant to be seen
Below is the the instruction that describes the task: ### Input: Set up a job for removing injections that are too distant to be seen ### Response: def cut_distant_injections(workflow, inj_file, out_dir, tags=None): "Set up a job for removing injections that are too distant to be seen" if tags is None: tags = [] node = Executable(workflow.cp, 'inj_cut', ifos=workflow.ifos, out_dir=out_dir, tags=tags).create_node() node.add_input_opt('--input', inj_file) node.new_output_file_opt(workflow.analysis_time, '.xml', '--output-file') workflow += node return node.output_files[0]
def mse(vref, vcmp): """ Compute Mean Squared Error (MSE) between two images. Parameters ---------- vref : array_like Reference image vcmp : array_like Comparison image Returns ------- x : float MSE between `vref` and `vcmp` """ r = np.asarray(vref, dtype=np.float64).ravel() c = np.asarray(vcmp, dtype=np.float64).ravel() return np.mean(np.abs(r - c)**2)
Compute Mean Squared Error (MSE) between two images. Parameters ---------- vref : array_like Reference image vcmp : array_like Comparison image Returns ------- x : float MSE between `vref` and `vcmp`
Below is the the instruction that describes the task: ### Input: Compute Mean Squared Error (MSE) between two images. Parameters ---------- vref : array_like Reference image vcmp : array_like Comparison image Returns ------- x : float MSE between `vref` and `vcmp` ### Response: def mse(vref, vcmp): """ Compute Mean Squared Error (MSE) between two images. Parameters ---------- vref : array_like Reference image vcmp : array_like Comparison image Returns ------- x : float MSE between `vref` and `vcmp` """ r = np.asarray(vref, dtype=np.float64).ravel() c = np.asarray(vcmp, dtype=np.float64).ravel() return np.mean(np.abs(r - c)**2)
def resizeEvent(self, event): """ Moves the widgets around the system. :param event | <QtGui.QResizeEvent> """ super(XWalkthroughWidget, self).resizeEvent(event) if self.isVisible(): self.autoLayout()
Moves the widgets around the system. :param event | <QtGui.QResizeEvent>
Below is the the instruction that describes the task: ### Input: Moves the widgets around the system. :param event | <QtGui.QResizeEvent> ### Response: def resizeEvent(self, event): """ Moves the widgets around the system. :param event | <QtGui.QResizeEvent> """ super(XWalkthroughWidget, self).resizeEvent(event) if self.isVisible(): self.autoLayout()
def _load_at(self, time, channels=None): """Load a waveform at a given time.""" if channels is None: channels = slice(None, None, None) time = int(time) time_o = time ns = self.n_samples_trace if not (0 <= time_o < ns): raise ValueError("Invalid time {0:d}/{1:d}.".format(time_o, ns)) slice_extract = _slice(time_o, self.n_samples_before_after, self._filter_margin) extract = self._traces[slice_extract][:, channels].astype(np.float32) # Pad the extracted chunk if needed. if slice_extract.start <= 0: extract = _pad(extract, self._n_samples_extract, 'left') elif slice_extract.stop >= ns - 1: extract = _pad(extract, self._n_samples_extract, 'right') assert extract.shape[0] == self._n_samples_extract return extract
Load a waveform at a given time.
Below is the the instruction that describes the task: ### Input: Load a waveform at a given time. ### Response: def _load_at(self, time, channels=None): """Load a waveform at a given time.""" if channels is None: channels = slice(None, None, None) time = int(time) time_o = time ns = self.n_samples_trace if not (0 <= time_o < ns): raise ValueError("Invalid time {0:d}/{1:d}.".format(time_o, ns)) slice_extract = _slice(time_o, self.n_samples_before_after, self._filter_margin) extract = self._traces[slice_extract][:, channels].astype(np.float32) # Pad the extracted chunk if needed. if slice_extract.start <= 0: extract = _pad(extract, self._n_samples_extract, 'left') elif slice_extract.stop >= ns - 1: extract = _pad(extract, self._n_samples_extract, 'right') assert extract.shape[0] == self._n_samples_extract return extract
def missing_representative_sequence(self): """list: List of genes with no mapping to a representative sequence.""" return [x.id for x in self.genes if not self.genes_with_a_representative_sequence.has_id(x.id)]
list: List of genes with no mapping to a representative sequence.
Below is the the instruction that describes the task: ### Input: list: List of genes with no mapping to a representative sequence. ### Response: def missing_representative_sequence(self): """list: List of genes with no mapping to a representative sequence.""" return [x.id for x in self.genes if not self.genes_with_a_representative_sequence.has_id(x.id)]
def register(self, plugin): """ Register the given plugin. Registration order is kept. :param plugin: the plugin to register. """ if plugin.name not in self._registered: logger.info('Registering plugin %s' % plugin.name) self._registered[plugin.name] = plugin else: logger.warn('Plugin %s already registered' % plugin.name)
Register the given plugin. Registration order is kept. :param plugin: the plugin to register.
Below is the the instruction that describes the task: ### Input: Register the given plugin. Registration order is kept. :param plugin: the plugin to register. ### Response: def register(self, plugin): """ Register the given plugin. Registration order is kept. :param plugin: the plugin to register. """ if plugin.name not in self._registered: logger.info('Registering plugin %s' % plugin.name) self._registered[plugin.name] = plugin else: logger.warn('Plugin %s already registered' % plugin.name)
def add_button(self, name, button_class=wtf_fields.SubmitField, **options): """Adds a button to the form.""" self._buttons[name] = button_class(**options)
Adds a button to the form.
Below is the the instruction that describes the task: ### Input: Adds a button to the form. ### Response: def add_button(self, name, button_class=wtf_fields.SubmitField, **options): """Adds a button to the form.""" self._buttons[name] = button_class(**options)
def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None): """Validate and return zoom levels.""" process_zoom_levels = _validate_zooms(process_zoom_levels) if init_zoom_levels is None: return process_zoom_levels else: init_zoom_levels = _validate_zooms(init_zoom_levels) if not set(init_zoom_levels).issubset(set(process_zoom_levels)): raise MapcheteConfigError( "init zooms must be a subset of process zoom") return init_zoom_levels
Validate and return zoom levels.
Below is the the instruction that describes the task: ### Input: Validate and return zoom levels. ### Response: def get_zoom_levels(process_zoom_levels=None, init_zoom_levels=None): """Validate and return zoom levels.""" process_zoom_levels = _validate_zooms(process_zoom_levels) if init_zoom_levels is None: return process_zoom_levels else: init_zoom_levels = _validate_zooms(init_zoom_levels) if not set(init_zoom_levels).issubset(set(process_zoom_levels)): raise MapcheteConfigError( "init zooms must be a subset of process zoom") return init_zoom_levels
def read_hyperparameters(): # type: () -> dict """Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. """ hyperparameters = _read_json(hyperparameters_file_dir) deserialized_hps = {} for k, v in hyperparameters.items(): try: v = json.loads(v) except (ValueError, TypeError): logger.info("Failed to parse hyperparameter %s value %s to Json.\n" "Returning the value itself", k, v) deserialized_hps[k] = v return deserialized_hps
Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters.
Below is the the instruction that describes the task: ### Input: Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. ### Response: def read_hyperparameters(): # type: () -> dict """Read the hyperparameters from /opt/ml/input/config/hyperparameters.json. For more information about hyperparameters.json: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-hyperparameters Returns: (dict[string, object]): a dictionary containing the hyperparameters. """ hyperparameters = _read_json(hyperparameters_file_dir) deserialized_hps = {} for k, v in hyperparameters.items(): try: v = json.loads(v) except (ValueError, TypeError): logger.info("Failed to parse hyperparameter %s value %s to Json.\n" "Returning the value itself", k, v) deserialized_hps[k] = v return deserialized_hps
def _convert_odict_to_classes(self, data, clean=False, merge=True, pop_schema=True, compare_to_existing=True, filter_on={}): """Convert `OrderedDict` into `Entry` or its derivative classes.""" self._log.debug("_convert_odict_to_classes(): {}".format(self.name())) self._log.debug("This should be a temporary fix. Dont be lazy.") # Setup filters. Currently only used for photometry. fkeys = list(filter_on.keys()) # Handle 'name' name_key = self._KEYS.NAME if name_key in data: self[name_key] = data.pop(name_key) # Handle 'schema' schema_key = self._KEYS.SCHEMA if schema_key in data: # Schema should be re-added every execution (done elsewhere) so # just delete the old entry if pop_schema: data.pop(schema_key) else: self[schema_key] = data.pop(schema_key) # Cleanup 'internal' repository stuff if clean: # Add data to `self` in ways accomodating 'internal' formats and # leeway. Removes each added entry from `data` so the remaining # stuff can be handled normally data = self.clean_internal(data) # Handle 'sources' # ---------------- src_key = self._KEYS.SOURCES if src_key in data: # Remove from `data` sources = data.pop(src_key) self._log.debug("Found {} '{}' entries".format( len(sources), src_key)) self._log.debug("{}: {}".format(src_key, sources)) for src in sources: self.add_source(allow_alias=True, **src) # Handle `photometry` # ------------------- photo_key = self._KEYS.PHOTOMETRY if photo_key in data: photoms = data.pop(photo_key) self._log.debug("Found {} '{}' entries".format( len(photoms), photo_key)) phcount = 0 for photo in photoms: skip = False for fkey in fkeys: if fkey in photo and photo[fkey] not in filter_on[fkey]: skip = True if skip: continue self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **photo) phcount += 1 self._log.debug("Added {} '{}' entries".format( phcount, photo_key)) # Handle `spectra` # --------------- spec_key = self._KEYS.SPECTRA if spec_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. spectra = data.pop(spec_key) self._log.debug("Found {} '{}' entries".format( len(spectra), spec_key)) for spec in spectra: self._add_cat_dict( Spectrum, self._KEYS.SPECTRA, compare_to_existing=compare_to_existing, **spec) # Handle `error` # -------------- err_key = self._KEYS.ERRORS if err_key in data: errors = data.pop(err_key) self._log.debug("Found {} '{}' entries".format( len(errors), err_key)) for err in errors: self._add_cat_dict(Error, self._KEYS.ERRORS, **err) # Handle `models` # --------------- model_key = self._KEYS.MODELS if model_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. model = data.pop(model_key) self._log.debug("Found {} '{}' entries".format( len(model), model_key)) for mod in model: self._add_cat_dict( Model, self._KEYS.MODELS, compare_to_existing=compare_to_existing, **mod) # Handle everything else --- should be `Quantity`s # ------------------------------------------------ if len(data): self._log.debug("{} remaining entries, assuming `Quantity`".format( len(data))) # Iterate over remaining keys for key in list(data.keys()): vals = data.pop(key) # All quantities should be in lists of that quantity # E.g. `aliases` is a list of alias quantities if not isinstance(vals, list): vals = [vals] self._log.debug("{}: {}".format(key, vals)) for vv in vals: self._add_cat_dict( Quantity, key, check_for_dupes=merge, compare_to_existing=compare_to_existing, **vv) if merge and self.dupe_of: self.merge_dupes() return
Convert `OrderedDict` into `Entry` or its derivative classes.
Below is the the instruction that describes the task: ### Input: Convert `OrderedDict` into `Entry` or its derivative classes. ### Response: def _convert_odict_to_classes(self, data, clean=False, merge=True, pop_schema=True, compare_to_existing=True, filter_on={}): """Convert `OrderedDict` into `Entry` or its derivative classes.""" self._log.debug("_convert_odict_to_classes(): {}".format(self.name())) self._log.debug("This should be a temporary fix. Dont be lazy.") # Setup filters. Currently only used for photometry. fkeys = list(filter_on.keys()) # Handle 'name' name_key = self._KEYS.NAME if name_key in data: self[name_key] = data.pop(name_key) # Handle 'schema' schema_key = self._KEYS.SCHEMA if schema_key in data: # Schema should be re-added every execution (done elsewhere) so # just delete the old entry if pop_schema: data.pop(schema_key) else: self[schema_key] = data.pop(schema_key) # Cleanup 'internal' repository stuff if clean: # Add data to `self` in ways accomodating 'internal' formats and # leeway. Removes each added entry from `data` so the remaining # stuff can be handled normally data = self.clean_internal(data) # Handle 'sources' # ---------------- src_key = self._KEYS.SOURCES if src_key in data: # Remove from `data` sources = data.pop(src_key) self._log.debug("Found {} '{}' entries".format( len(sources), src_key)) self._log.debug("{}: {}".format(src_key, sources)) for src in sources: self.add_source(allow_alias=True, **src) # Handle `photometry` # ------------------- photo_key = self._KEYS.PHOTOMETRY if photo_key in data: photoms = data.pop(photo_key) self._log.debug("Found {} '{}' entries".format( len(photoms), photo_key)) phcount = 0 for photo in photoms: skip = False for fkey in fkeys: if fkey in photo and photo[fkey] not in filter_on[fkey]: skip = True if skip: continue self._add_cat_dict( Photometry, self._KEYS.PHOTOMETRY, compare_to_existing=compare_to_existing, **photo) phcount += 1 self._log.debug("Added {} '{}' entries".format( phcount, photo_key)) # Handle `spectra` # --------------- spec_key = self._KEYS.SPECTRA if spec_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. spectra = data.pop(spec_key) self._log.debug("Found {} '{}' entries".format( len(spectra), spec_key)) for spec in spectra: self._add_cat_dict( Spectrum, self._KEYS.SPECTRA, compare_to_existing=compare_to_existing, **spec) # Handle `error` # -------------- err_key = self._KEYS.ERRORS if err_key in data: errors = data.pop(err_key) self._log.debug("Found {} '{}' entries".format( len(errors), err_key)) for err in errors: self._add_cat_dict(Error, self._KEYS.ERRORS, **err) # Handle `models` # --------------- model_key = self._KEYS.MODELS if model_key in data: # When we are cleaning internal data, we don't always want to # require all of the normal spectrum data elements. model = data.pop(model_key) self._log.debug("Found {} '{}' entries".format( len(model), model_key)) for mod in model: self._add_cat_dict( Model, self._KEYS.MODELS, compare_to_existing=compare_to_existing, **mod) # Handle everything else --- should be `Quantity`s # ------------------------------------------------ if len(data): self._log.debug("{} remaining entries, assuming `Quantity`".format( len(data))) # Iterate over remaining keys for key in list(data.keys()): vals = data.pop(key) # All quantities should be in lists of that quantity # E.g. `aliases` is a list of alias quantities if not isinstance(vals, list): vals = [vals] self._log.debug("{}: {}".format(key, vals)) for vv in vals: self._add_cat_dict( Quantity, key, check_for_dupes=merge, compare_to_existing=compare_to_existing, **vv) if merge and self.dupe_of: self.merge_dupes() return
def _match_cmap(self, fitsimage, colorbar): """ Help method to change the ColorBar to match the cut levels or colormap used in a ginga ImageView. """ rgbmap = fitsimage.get_rgbmap() loval, hival = fitsimage.get_cut_levels() colorbar.set_range(loval, hival) # If we are sharing a ColorBar for all channels, then store # to change the ColorBar's rgbmap to match our colorbar.set_rgbmap(rgbmap)
Help method to change the ColorBar to match the cut levels or colormap used in a ginga ImageView.
Below is the the instruction that describes the task: ### Input: Help method to change the ColorBar to match the cut levels or colormap used in a ginga ImageView. ### Response: def _match_cmap(self, fitsimage, colorbar): """ Help method to change the ColorBar to match the cut levels or colormap used in a ginga ImageView. """ rgbmap = fitsimage.get_rgbmap() loval, hival = fitsimage.get_cut_levels() colorbar.set_range(loval, hival) # If we are sharing a ColorBar for all channels, then store # to change the ColorBar's rgbmap to match our colorbar.set_rgbmap(rgbmap)
def sepconv_relu_sepconv(inputs, filter_size, output_size, first_kernel_size=(1, 1), second_kernel_size=(1, 1), padding="LEFT", nonpadding_mask=None, dropout=0.0, name=None): """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) if inputs.get_shape().ndims == 3: is_3d = True inputs = tf.expand_dims(inputs, 2) else: is_3d = False h = separable_conv( inputs, filter_size, first_kernel_size, activation=tf.nn.relu, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) ret = separable_conv( h, output_size, second_kernel_size, padding=padding, name="conv2") if is_3d: ret = tf.squeeze(ret, 2) return ret
Hidden layer with RELU activation followed by linear projection.
Below is the the instruction that describes the task: ### Input: Hidden layer with RELU activation followed by linear projection. ### Response: def sepconv_relu_sepconv(inputs, filter_size, output_size, first_kernel_size=(1, 1), second_kernel_size=(1, 1), padding="LEFT", nonpadding_mask=None, dropout=0.0, name=None): """Hidden layer with RELU activation followed by linear projection.""" with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) if inputs.get_shape().ndims == 3: is_3d = True inputs = tf.expand_dims(inputs, 2) else: is_3d = False h = separable_conv( inputs, filter_size, first_kernel_size, activation=tf.nn.relu, padding=padding, name="conv1") if dropout != 0.0: h = tf.nn.dropout(h, 1.0 - dropout) h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) ret = separable_conv( h, output_size, second_kernel_size, padding=padding, name="conv2") if is_3d: ret = tf.squeeze(ret, 2) return ret
def _retrieve_all_teams(self, year): """ Find and create Team instances for all teams in the given season. For a given season, parses the specified NCAAF stats table and finds all requested stats. Each team then has a Team instance created which includes all requested stats and a few identifiers, such as the team's name and abbreviation. All of the individual Team instances are added to a list. Note that this method is called directly once Teams is invoked and does not need to be called manually. Parameters ---------- year : string The requested year to pull stats from. """ team_data_dict = {} if not year: year = utils._find_year_for_season('ncaaf') doc = pq(SEASON_PAGE_URL % year) teams_list = utils._get_stats_table(doc, 'div#div_standings') offense_doc = pq(OFFENSIVE_STATS_URL % year) offense_list = utils._get_stats_table(offense_doc, 'table#offense') for stats_list in [teams_list, offense_list]: team_data_dict = self._add_stats_data(stats_list, team_data_dict) for team_name, team_data in team_data_dict.items(): team = Team(team_data['data'], self._conferences_dict[team_name.lower()], year) self._teams.append(team)
Find and create Team instances for all teams in the given season. For a given season, parses the specified NCAAF stats table and finds all requested stats. Each team then has a Team instance created which includes all requested stats and a few identifiers, such as the team's name and abbreviation. All of the individual Team instances are added to a list. Note that this method is called directly once Teams is invoked and does not need to be called manually. Parameters ---------- year : string The requested year to pull stats from.
Below is the the instruction that describes the task: ### Input: Find and create Team instances for all teams in the given season. For a given season, parses the specified NCAAF stats table and finds all requested stats. Each team then has a Team instance created which includes all requested stats and a few identifiers, such as the team's name and abbreviation. All of the individual Team instances are added to a list. Note that this method is called directly once Teams is invoked and does not need to be called manually. Parameters ---------- year : string The requested year to pull stats from. ### Response: def _retrieve_all_teams(self, year): """ Find and create Team instances for all teams in the given season. For a given season, parses the specified NCAAF stats table and finds all requested stats. Each team then has a Team instance created which includes all requested stats and a few identifiers, such as the team's name and abbreviation. All of the individual Team instances are added to a list. Note that this method is called directly once Teams is invoked and does not need to be called manually. Parameters ---------- year : string The requested year to pull stats from. """ team_data_dict = {} if not year: year = utils._find_year_for_season('ncaaf') doc = pq(SEASON_PAGE_URL % year) teams_list = utils._get_stats_table(doc, 'div#div_standings') offense_doc = pq(OFFENSIVE_STATS_URL % year) offense_list = utils._get_stats_table(offense_doc, 'table#offense') for stats_list in [teams_list, offense_list]: team_data_dict = self._add_stats_data(stats_list, team_data_dict) for team_name, team_data in team_data_dict.items(): team = Team(team_data['data'], self._conferences_dict[team_name.lower()], year) self._teams.append(team)
def _get_cmap_data(data, kwargs) -> Tuple[colors.Normalize, np.ndarray]: """Get normalized values to be used with a colormap. Parameters ---------- data : array_like cmap_min : Optional[float] or "min" By default 0. If "min", minimum value of the data. cmap_max : Optional[float] By default, maximum value of the data cmap_normalize : str or colors.Normalize Returns ------- normalizer : colors.Normalize normalized_data : array_like """ norm = kwargs.pop("cmap_normalize", None) if norm == "log": cmap_max = kwargs.pop("cmap_max", data.max()) cmap_min = kwargs.pop("cmap_min", data[data > 0].min()) norm = colors.LogNorm(cmap_min, cmap_max) elif not norm: cmap_max = kwargs.pop("cmap_max", data.max()) cmap_min = kwargs.pop("cmap_min", 0) if cmap_min == "min": cmap_min = data.min() norm = colors.Normalize(cmap_min, cmap_max, clip=True) return norm, norm(data)
Get normalized values to be used with a colormap. Parameters ---------- data : array_like cmap_min : Optional[float] or "min" By default 0. If "min", minimum value of the data. cmap_max : Optional[float] By default, maximum value of the data cmap_normalize : str or colors.Normalize Returns ------- normalizer : colors.Normalize normalized_data : array_like
Below is the the instruction that describes the task: ### Input: Get normalized values to be used with a colormap. Parameters ---------- data : array_like cmap_min : Optional[float] or "min" By default 0. If "min", minimum value of the data. cmap_max : Optional[float] By default, maximum value of the data cmap_normalize : str or colors.Normalize Returns ------- normalizer : colors.Normalize normalized_data : array_like ### Response: def _get_cmap_data(data, kwargs) -> Tuple[colors.Normalize, np.ndarray]: """Get normalized values to be used with a colormap. Parameters ---------- data : array_like cmap_min : Optional[float] or "min" By default 0. If "min", minimum value of the data. cmap_max : Optional[float] By default, maximum value of the data cmap_normalize : str or colors.Normalize Returns ------- normalizer : colors.Normalize normalized_data : array_like """ norm = kwargs.pop("cmap_normalize", None) if norm == "log": cmap_max = kwargs.pop("cmap_max", data.max()) cmap_min = kwargs.pop("cmap_min", data[data > 0].min()) norm = colors.LogNorm(cmap_min, cmap_max) elif not norm: cmap_max = kwargs.pop("cmap_max", data.max()) cmap_min = kwargs.pop("cmap_min", 0) if cmap_min == "min": cmap_min = data.min() norm = colors.Normalize(cmap_min, cmap_max, clip=True) return norm, norm(data)
def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]: raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) if port_number not in self._nios: raise NodeError("Port {} is not connected".format(port_number)) nio = self._nios[port_number] if nio.capturing: raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number)) nio.startPacketCapture(output_file) bridge_name = "{}-{}".format(self._id, port_number) yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, output_file=output_file)) log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number))
Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB
Below is the the instruction that describes the task: ### Input: Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB ### Response: def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"): """ Starts a packet capture. :param port_number: allocated port number :param output_file: PCAP destination file for the capture :param data_link_type: PCAP data link type (DLT_*), default is DLT_EN10MB """ if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]: raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name, port_number=port_number)) if port_number not in self._nios: raise NodeError("Port {} is not connected".format(port_number)) nio = self._nios[port_number] if nio.capturing: raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number)) nio.startPacketCapture(output_file) bridge_name = "{}-{}".format(self._id, port_number) yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name, output_file=output_file)) log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name, id=self.id, port_number=port_number))
def assert_is_not(expected, actual, message=None, extra=None): """Raises an AssertionError if expected is actual.""" assert expected is not actual, _assert_fail_message( message, expected, actual, "is", extra )
Raises an AssertionError if expected is actual.
Below is the the instruction that describes the task: ### Input: Raises an AssertionError if expected is actual. ### Response: def assert_is_not(expected, actual, message=None, extra=None): """Raises an AssertionError if expected is actual.""" assert expected is not actual, _assert_fail_message( message, expected, actual, "is", extra )
def get_paginated_response(self, data): """ Annotate the response with pagination information. """ return Response({ 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.page.paginator.count, 'num_pages': self.page.paginator.num_pages, 'current_page': self.page.number, 'start': (self.page.number - 1) * self.get_page_size(self.request), 'results': data })
Annotate the response with pagination information.
Below is the the instruction that describes the task: ### Input: Annotate the response with pagination information. ### Response: def get_paginated_response(self, data): """ Annotate the response with pagination information. """ return Response({ 'next': self.get_next_link(), 'previous': self.get_previous_link(), 'count': self.page.paginator.count, 'num_pages': self.page.paginator.num_pages, 'current_page': self.page.number, 'start': (self.page.number - 1) * self.get_page_size(self.request), 'results': data })
def dumpDictHdf5(RV,o): """ Dump a dictionary where each page is a list or an array """ for key in list(RV.keys()): o.create_dataset(name=key,data=SP.array(RV[key]),chunks=True,compression='gzip')
Dump a dictionary where each page is a list or an array
Below is the the instruction that describes the task: ### Input: Dump a dictionary where each page is a list or an array ### Response: def dumpDictHdf5(RV,o): """ Dump a dictionary where each page is a list or an array """ for key in list(RV.keys()): o.create_dataset(name=key,data=SP.array(RV[key]),chunks=True,compression='gzip')
def _ioctl_cast(n): """ Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is signed. Until 2.5 Python exclusively implemented the BSD behaviour, preventing use of large unsigned int requests like the TTY layer uses below. So on 2.4, we cast our unsigned to look like signed for Python. """ if sys.version_info < (2, 5): n, = struct.unpack('i', struct.pack('I', n)) return n
Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is signed. Until 2.5 Python exclusively implemented the BSD behaviour, preventing use of large unsigned int requests like the TTY layer uses below. So on 2.4, we cast our unsigned to look like signed for Python.
Below is the the instruction that describes the task: ### Input: Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is signed. Until 2.5 Python exclusively implemented the BSD behaviour, preventing use of large unsigned int requests like the TTY layer uses below. So on 2.4, we cast our unsigned to look like signed for Python. ### Response: def _ioctl_cast(n): """ Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is signed. Until 2.5 Python exclusively implemented the BSD behaviour, preventing use of large unsigned int requests like the TTY layer uses below. So on 2.4, we cast our unsigned to look like signed for Python. """ if sys.version_info < (2, 5): n, = struct.unpack('i', struct.pack('I', n)) return n
def values(self): """Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return [self.policy.header_fetch_parse(k, v) for k, v in self._headers]
Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list.
Below is the the instruction that describes the task: ### Input: Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. ### Response: def values(self): """Return a list of all the message's header values. These will be sorted in the order they appeared in the original message, or were added to the message, and may contain duplicates. Any fields deleted and re-inserted are always appended to the header list. """ return [self.policy.header_fetch_parse(k, v) for k, v in self._headers]
def execute_django(self, soql, args=()): """ Fixed execute for queries coming from Django query compilers """ response = None sqltype = soql.split(None, 1)[0].upper() if isinstance(self.query, subqueries.InsertQuery): response = self.execute_insert(self.query) elif isinstance(self.query, subqueries.UpdateQuery): response = self.execute_update(self.query) elif isinstance(self.query, subqueries.DeleteQuery): response = self.execute_delete(self.query) elif isinstance(self.query, RawQuery): self.execute_select(soql, args) elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'): log.info("Ignored SQL command '%s'", sqltype) return elif isinstance(self.query, Query): self.execute_select(soql, args) else: raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query)) return response
Fixed execute for queries coming from Django query compilers
Below is the the instruction that describes the task: ### Input: Fixed execute for queries coming from Django query compilers ### Response: def execute_django(self, soql, args=()): """ Fixed execute for queries coming from Django query compilers """ response = None sqltype = soql.split(None, 1)[0].upper() if isinstance(self.query, subqueries.InsertQuery): response = self.execute_insert(self.query) elif isinstance(self.query, subqueries.UpdateQuery): response = self.execute_update(self.query) elif isinstance(self.query, subqueries.DeleteQuery): response = self.execute_delete(self.query) elif isinstance(self.query, RawQuery): self.execute_select(soql, args) elif sqltype in ('SAVEPOINT', 'ROLLBACK', 'RELEASE'): log.info("Ignored SQL command '%s'", sqltype) return elif isinstance(self.query, Query): self.execute_select(soql, args) else: raise DatabaseError("Unsupported query: type %s: %s" % (type(self.query), self.query)) return response