code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def createReport(tm, options, sequenceString, numSegments, numSynapses): """ Create CSV file with detailed trace of predictions, missed predictions, accuracy, segment/synapse growth, etc. """ pac = tm.mmGetTracePredictedActiveColumns() pic = tm.mmGetTracePredictedInactiveColumns() upac = tm.mmGetTraceUnpredictedActiveColumns() resultsFilename = os.path.join("results", options.name+"_"+str(int(100*options.noise))+".csv") with open(resultsFilename,"wb") as resultsFile: csvWriter = csv.writer(resultsFile) accuracies = numpy.zeros(len(pac.data)) smoothedAccuracies = [] am = 0 csvWriter.writerow(["time", "element", "pac", "pic", "upac", "a", "am", "accuracy", "sum", "nSegs", "nSyns"]) for i,j in enumerate(pac.data): if i>0: # Compute instantaneous and average accuracy. a = computePredictionAccuracy(len(j), len(pic.data[i])) # Smooth the curve to get averaged results for the paper. am = 0.99*am + 0.01*a accuracies[i] = am i0 = max(0, i-60+1) accuracy = numpy.mean(accuracies[i0:i+1]) smoothedAccuracies.append(accuracy) row=[i, sequenceString[i], len(j), len(pic.data[i]), len(upac.data[i]), a, am, accuracy, numpy.sum(accuracies[i0:i+1]), numSegments[i], numSynapses[i]] csvWriter.writerow(row) return smoothedAccuracies
Create CSV file with detailed trace of predictions, missed predictions, accuracy, segment/synapse growth, etc.
Below is the the instruction that describes the task: ### Input: Create CSV file with detailed trace of predictions, missed predictions, accuracy, segment/synapse growth, etc. ### Response: def createReport(tm, options, sequenceString, numSegments, numSynapses): """ Create CSV file with detailed trace of predictions, missed predictions, accuracy, segment/synapse growth, etc. """ pac = tm.mmGetTracePredictedActiveColumns() pic = tm.mmGetTracePredictedInactiveColumns() upac = tm.mmGetTraceUnpredictedActiveColumns() resultsFilename = os.path.join("results", options.name+"_"+str(int(100*options.noise))+".csv") with open(resultsFilename,"wb") as resultsFile: csvWriter = csv.writer(resultsFile) accuracies = numpy.zeros(len(pac.data)) smoothedAccuracies = [] am = 0 csvWriter.writerow(["time", "element", "pac", "pic", "upac", "a", "am", "accuracy", "sum", "nSegs", "nSyns"]) for i,j in enumerate(pac.data): if i>0: # Compute instantaneous and average accuracy. a = computePredictionAccuracy(len(j), len(pic.data[i])) # Smooth the curve to get averaged results for the paper. am = 0.99*am + 0.01*a accuracies[i] = am i0 = max(0, i-60+1) accuracy = numpy.mean(accuracies[i0:i+1]) smoothedAccuracies.append(accuracy) row=[i, sequenceString[i], len(j), len(pic.data[i]), len(upac.data[i]), a, am, accuracy, numpy.sum(accuracies[i0:i+1]), numSegments[i], numSynapses[i]] csvWriter.writerow(row) return smoothedAccuracies
def evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the PPO agent in the real environment.""" tf.logging.info("Evaluating metric %s", get_metric_name( sampling_temp, max_num_noops, clipped=False )) eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params) env = setup_env( hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops, rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp) rollouts = env.current_epoch_rollouts() env.close() return tuple( compute_mean_reward(rollouts, clipped) for clipped in (True, False) )
Evaluate the PPO agent in the real environment.
Below is the the instruction that describes the task: ### Input: Evaluate the PPO agent in the real environment. ### Response: def evaluate_single_config( hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn=_eval_fn_with_learner ): """Evaluate the PPO agent in the real environment.""" tf.logging.info("Evaluating metric %s", get_metric_name( sampling_temp, max_num_noops, clipped=False )) eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params) env = setup_env( hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops, rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps, env_name=hparams.rl_env_name) env.start_new_epoch(0) eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp) rollouts = env.current_epoch_rollouts() env.close() return tuple( compute_mean_reward(rollouts, clipped) for clipped in (True, False) )
def has_slave(self): '''Returns True/False wether we have a slave agency which is not standalone running.''' slave = first(x for x in self.slaves.itervalues() if not x.is_standalone) return slave is not None
Returns True/False wether we have a slave agency which is not standalone running.
Below is the the instruction that describes the task: ### Input: Returns True/False wether we have a slave agency which is not standalone running. ### Response: def has_slave(self): '''Returns True/False wether we have a slave agency which is not standalone running.''' slave = first(x for x in self.slaves.itervalues() if not x.is_standalone) return slave is not None
def createRtiFromFileName(fileName): """ Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions. """ cls, rtiRegItem = detectRtiFromFileName(fileName) if cls is None: logger.warn("Unable to import plugin {}: {}" .format(rtiRegItem.fullName, rtiRegItem.exception)) rti = UnknownFileRti.createFromFileName(fileName) rti.setException(rtiRegItem.exception) else: rti = cls.createFromFileName(fileName) assert rti, "Sanity check failed (createRtiFromFileName). Please report this bug." return rti
Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions.
Below is the the instruction that describes the task: ### Input: Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions. ### Response: def createRtiFromFileName(fileName): """ Determines the type of RepoTreeItem to use given a file name and creates it. Uses a DirectoryRti for directories and an UnknownFileRti if the file extension doesn't match one of the registered RTI extensions. """ cls, rtiRegItem = detectRtiFromFileName(fileName) if cls is None: logger.warn("Unable to import plugin {}: {}" .format(rtiRegItem.fullName, rtiRegItem.exception)) rti = UnknownFileRti.createFromFileName(fileName) rti.setException(rtiRegItem.exception) else: rti = cls.createFromFileName(fileName) assert rti, "Sanity check failed (createRtiFromFileName). Please report this bug." return rti
def average_sphere(image, center, radius, weighted=True, ret_crop=False): """Compute the weighted average phase from a phase image of a sphere Parameters ---------- image: 2d ndarray Quantitative phase image of a sphere center: tuble (x,y) Center of the sphere in `image` in ndarray coordinates radius: float Radius of the sphere in pixels weighted: bool If `True`, return average phase density weighted with the height profile obtained from the radius, otherwise return simple average phase density. Weighting gives data points at the center of the sphere more weight than those points at the boundary of the sphere, avoiding edge artifacts. ret_crop: bool Return the cropped image. Returns ------- average: float The average phase value of the sphere from which the refractive index can be computed cropped_image: 2d ndarray Returned if `ret_crop` is True """ sx, sy = image.shape x = np.arange(sx).reshape(-1, 1) y = np.arange(sy).reshape(1, -1) discsq = ((x - center[0])**2 + (y - center[1])**2) root = radius**2 - discsq # height of the cell for each x and y h = 2 * np.sqrt(root * (root > 0)) # compute phase density rho = np.zeros(image.shape) hbin = h != 0 # phase density [rad/px] rho[hbin] = image[hbin] / h[hbin] if weighted: # compute weighted average average = np.sum(rho * h) / np.sum(h) else: # compute simple average average = np.sum(rho) / np.sum(hbin) ret = average if ret_crop: ret = (ret, rho) return ret
Compute the weighted average phase from a phase image of a sphere Parameters ---------- image: 2d ndarray Quantitative phase image of a sphere center: tuble (x,y) Center of the sphere in `image` in ndarray coordinates radius: float Radius of the sphere in pixels weighted: bool If `True`, return average phase density weighted with the height profile obtained from the radius, otherwise return simple average phase density. Weighting gives data points at the center of the sphere more weight than those points at the boundary of the sphere, avoiding edge artifacts. ret_crop: bool Return the cropped image. Returns ------- average: float The average phase value of the sphere from which the refractive index can be computed cropped_image: 2d ndarray Returned if `ret_crop` is True
Below is the the instruction that describes the task: ### Input: Compute the weighted average phase from a phase image of a sphere Parameters ---------- image: 2d ndarray Quantitative phase image of a sphere center: tuble (x,y) Center of the sphere in `image` in ndarray coordinates radius: float Radius of the sphere in pixels weighted: bool If `True`, return average phase density weighted with the height profile obtained from the radius, otherwise return simple average phase density. Weighting gives data points at the center of the sphere more weight than those points at the boundary of the sphere, avoiding edge artifacts. ret_crop: bool Return the cropped image. Returns ------- average: float The average phase value of the sphere from which the refractive index can be computed cropped_image: 2d ndarray Returned if `ret_crop` is True ### Response: def average_sphere(image, center, radius, weighted=True, ret_crop=False): """Compute the weighted average phase from a phase image of a sphere Parameters ---------- image: 2d ndarray Quantitative phase image of a sphere center: tuble (x,y) Center of the sphere in `image` in ndarray coordinates radius: float Radius of the sphere in pixels weighted: bool If `True`, return average phase density weighted with the height profile obtained from the radius, otherwise return simple average phase density. Weighting gives data points at the center of the sphere more weight than those points at the boundary of the sphere, avoiding edge artifacts. ret_crop: bool Return the cropped image. Returns ------- average: float The average phase value of the sphere from which the refractive index can be computed cropped_image: 2d ndarray Returned if `ret_crop` is True """ sx, sy = image.shape x = np.arange(sx).reshape(-1, 1) y = np.arange(sy).reshape(1, -1) discsq = ((x - center[0])**2 + (y - center[1])**2) root = radius**2 - discsq # height of the cell for each x and y h = 2 * np.sqrt(root * (root > 0)) # compute phase density rho = np.zeros(image.shape) hbin = h != 0 # phase density [rad/px] rho[hbin] = image[hbin] / h[hbin] if weighted: # compute weighted average average = np.sum(rho * h) / np.sum(h) else: # compute simple average average = np.sum(rho) / np.sum(hbin) ret = average if ret_crop: ret = (ret, rho) return ret
def _get_batch_representative(items, key): """Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file. """ if isinstance(items, dict): return items, items else: vals = set([]) out = [] for data in items: if key in data: vals.add(data[key]) out.append(data) if len(vals) != 1: raise ValueError("Incorrect values for %s: %s" % (key, list(vals))) return out[0], items
Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file.
Below is the the instruction that describes the task: ### Input: Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file. ### Response: def _get_batch_representative(items, key): """Retrieve a representative data item from a batch. Handles standard bcbio cases (a single data item) and CWL cases with batches that have a consistent variant file. """ if isinstance(items, dict): return items, items else: vals = set([]) out = [] for data in items: if key in data: vals.add(data[key]) out.append(data) if len(vals) != 1: raise ValueError("Incorrect values for %s: %s" % (key, list(vals))) return out[0], items
def path_size(p: tcod.path.AStar) -> int: """Return the current length of the computed path. Args: p (AStar): An AStar instance. Returns: int: Length of the path. """ return int(lib.TCOD_path_size(p._path_c))
Return the current length of the computed path. Args: p (AStar): An AStar instance. Returns: int: Length of the path.
Below is the the instruction that describes the task: ### Input: Return the current length of the computed path. Args: p (AStar): An AStar instance. Returns: int: Length of the path. ### Response: def path_size(p: tcod.path.AStar) -> int: """Return the current length of the computed path. Args: p (AStar): An AStar instance. Returns: int: Length of the path. """ return int(lib.TCOD_path_size(p._path_c))
def filter_slow_requests(slowness): """Filter :class:`.Line` objects by their response time. :param slowness: minimum time, in milliseconds, a server needs to answer a request. If the server takes more time than that the log line is accepted. :type slowness: string :returns: a function that filters by the server response time. :rtype: function """ def filter_func(log_line): slowness_int = int(slowness) return slowness_int <= log_line.time_wait_response return filter_func
Filter :class:`.Line` objects by their response time. :param slowness: minimum time, in milliseconds, a server needs to answer a request. If the server takes more time than that the log line is accepted. :type slowness: string :returns: a function that filters by the server response time. :rtype: function
Below is the the instruction that describes the task: ### Input: Filter :class:`.Line` objects by their response time. :param slowness: minimum time, in milliseconds, a server needs to answer a request. If the server takes more time than that the log line is accepted. :type slowness: string :returns: a function that filters by the server response time. :rtype: function ### Response: def filter_slow_requests(slowness): """Filter :class:`.Line` objects by their response time. :param slowness: minimum time, in milliseconds, a server needs to answer a request. If the server takes more time than that the log line is accepted. :type slowness: string :returns: a function that filters by the server response time. :rtype: function """ def filter_func(log_line): slowness_int = int(slowness) return slowness_int <= log_line.time_wait_response return filter_func
def vmx_path(self, vmx_path): """ Sets the path to the vmx file. :param vmx_path: VMware vmx file """ log.info("VMware VM '{name}' [{id}] has set the vmx file path to '{vmx}'".format(name=self.name, id=self.id, vmx=vmx_path)) self._vmx_path = vmx_path
Sets the path to the vmx file. :param vmx_path: VMware vmx file
Below is the the instruction that describes the task: ### Input: Sets the path to the vmx file. :param vmx_path: VMware vmx file ### Response: def vmx_path(self, vmx_path): """ Sets the path to the vmx file. :param vmx_path: VMware vmx file """ log.info("VMware VM '{name}' [{id}] has set the vmx file path to '{vmx}'".format(name=self.name, id=self.id, vmx=vmx_path)) self._vmx_path = vmx_path
def get_lr(self, score): """Compute a :float: likelihood ratio from a provided similarity score when compared to two probability density functions which are computed and pre-loaded during init. The numerator indicates the probability density that a particular similarity score corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of telemetry variables. The denominator indicates the probability density that a particular similarity score corresponds to a 'poor' addon donor :param score: A similarity score between a pair of objects. :returns: The approximate float likelihood ratio corresponding to provided score. """ # Find the index of the closest value that was precomputed in lr_curves # This will significantly speed up |get_lr|. # The lr_curves_cache is a list of scalar distance # measurements lr_curves_cache = np.array([s[0] for s in self.lr_curves]) # np.argmin produces the index to the part of the curve # where distance is the smallest to the score which we are # inspecting currently. idx = np.argmin(abs(score - lr_curves_cache)) numer_val = self.lr_curves[idx][1][0] denum_val = self.lr_curves[idx][1][1] # Compute LR based on numerator and denominator values return float(numer_val) / float(denum_val)
Compute a :float: likelihood ratio from a provided similarity score when compared to two probability density functions which are computed and pre-loaded during init. The numerator indicates the probability density that a particular similarity score corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of telemetry variables. The denominator indicates the probability density that a particular similarity score corresponds to a 'poor' addon donor :param score: A similarity score between a pair of objects. :returns: The approximate float likelihood ratio corresponding to provided score.
Below is the the instruction that describes the task: ### Input: Compute a :float: likelihood ratio from a provided similarity score when compared to two probability density functions which are computed and pre-loaded during init. The numerator indicates the probability density that a particular similarity score corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of telemetry variables. The denominator indicates the probability density that a particular similarity score corresponds to a 'poor' addon donor :param score: A similarity score between a pair of objects. :returns: The approximate float likelihood ratio corresponding to provided score. ### Response: def get_lr(self, score): """Compute a :float: likelihood ratio from a provided similarity score when compared to two probability density functions which are computed and pre-loaded during init. The numerator indicates the probability density that a particular similarity score corresponds to a 'good' addon donor, i.e. a client that is similar in the sense of telemetry variables. The denominator indicates the probability density that a particular similarity score corresponds to a 'poor' addon donor :param score: A similarity score between a pair of objects. :returns: The approximate float likelihood ratio corresponding to provided score. """ # Find the index of the closest value that was precomputed in lr_curves # This will significantly speed up |get_lr|. # The lr_curves_cache is a list of scalar distance # measurements lr_curves_cache = np.array([s[0] for s in self.lr_curves]) # np.argmin produces the index to the part of the curve # where distance is the smallest to the score which we are # inspecting currently. idx = np.argmin(abs(score - lr_curves_cache)) numer_val = self.lr_curves[idx][1][0] denum_val = self.lr_curves[idx][1][1] # Compute LR based on numerator and denominator values return float(numer_val) / float(denum_val)
def set_lr(self, lr): """ Set a learning rate for the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
Set a learning rate for the optimizer
Below is the the instruction that describes the task: ### Input: Set a learning rate for the optimizer ### Response: def set_lr(self, lr): """ Set a learning rate for the optimizer """ if isinstance(lr, list): for group_lr, param_group in zip(lr, self.optimizer.param_groups): param_group['lr'] = group_lr else: for param_group in self.optimizer.param_groups: param_group['lr'] = lr
def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params]
Set the free parameters. Note that this bypasses enforce_bounds.
Below is the the instruction that describes the task: ### Input: Set the free parameters. Note that this bypasses enforce_bounds. ### Response: def free_params(self, value): """Set the free parameters. Note that this bypasses enforce_bounds. """ value = scipy.asarray(value, dtype=float) self.K_up_to_date = False self.k.free_params = value[:self.k.num_free_params] self.w.free_params = value[self.k.num_free_params:self.k.num_free_params + self.w.num_free_params]
def get_report_section( html_report, component_id, container_wrapper_format=container_format): """Get specific report section from InaSAFE analysis summary report. :param html_report: The html report. :type html_report: basestring :param component_id: The component key. :type component_id: str :param container_wrapper_format: A string format for wrapping the section. :type container_wrapper_format: basestring :return: Requested report section as an html. :rtype: basestring """ no_element_error = tr('No element match the tag or component id.') root_element, dict_of_elements = ET.XMLID(html_report) section_element = dict_of_elements.get(component_id) if section_element: requested_section = container_wrapper_format.format( section_content=str(ET.tostring(section_element))) return requested_section else: return no_element_error
Get specific report section from InaSAFE analysis summary report. :param html_report: The html report. :type html_report: basestring :param component_id: The component key. :type component_id: str :param container_wrapper_format: A string format for wrapping the section. :type container_wrapper_format: basestring :return: Requested report section as an html. :rtype: basestring
Below is the the instruction that describes the task: ### Input: Get specific report section from InaSAFE analysis summary report. :param html_report: The html report. :type html_report: basestring :param component_id: The component key. :type component_id: str :param container_wrapper_format: A string format for wrapping the section. :type container_wrapper_format: basestring :return: Requested report section as an html. :rtype: basestring ### Response: def get_report_section( html_report, component_id, container_wrapper_format=container_format): """Get specific report section from InaSAFE analysis summary report. :param html_report: The html report. :type html_report: basestring :param component_id: The component key. :type component_id: str :param container_wrapper_format: A string format for wrapping the section. :type container_wrapper_format: basestring :return: Requested report section as an html. :rtype: basestring """ no_element_error = tr('No element match the tag or component id.') root_element, dict_of_elements = ET.XMLID(html_report) section_element = dict_of_elements.get(component_id) if section_element: requested_section = container_wrapper_format.format( section_content=str(ET.tostring(section_element))) return requested_section else: return no_element_error
def contains(self, x: int, y: int) -> bool: """Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False. """ return ( self.x <= x < self.x + self.width and self.y <= y < self.y + self.height )
Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False.
Below is the the instruction that describes the task: ### Input: Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False. ### Response: def contains(self, x: int, y: int) -> bool: """Returns True if this node contains these coordinates. Args: x (int): X position to check. y (int): Y position to check. Returns: bool: True if this node contains these coordinates. Otherwise False. """ return ( self.x <= x < self.x + self.width and self.y <= y < self.y + self.height )
def get(self, country_code): """ Constructs a AvailablePhoneNumberCountryContext :param country_code: The ISO country code of the country to fetch available phone number information about :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext """ return AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=country_code, )
Constructs a AvailablePhoneNumberCountryContext :param country_code: The ISO country code of the country to fetch available phone number information about :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
Below is the the instruction that describes the task: ### Input: Constructs a AvailablePhoneNumberCountryContext :param country_code: The ISO country code of the country to fetch available phone number information about :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext ### Response: def get(self, country_code): """ Constructs a AvailablePhoneNumberCountryContext :param country_code: The ISO country code of the country to fetch available phone number information about :returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext """ return AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=country_code, )
def options_string_builder(option_mapping, args): """Return arguments for CLI invocation of kal.""" options_string = "" for option, flag in option_mapping.items(): if option in args: options_string += str(" %s %s" % (flag, str(args[option]))) return options_string
Return arguments for CLI invocation of kal.
Below is the the instruction that describes the task: ### Input: Return arguments for CLI invocation of kal. ### Response: def options_string_builder(option_mapping, args): """Return arguments for CLI invocation of kal.""" options_string = "" for option, flag in option_mapping.items(): if option in args: options_string += str(" %s %s" % (flag, str(args[option]))) return options_string
def _CanSkipDataStream(self, file_entry, data_stream): """Determines if analysis and extraction of a data stream can be skipped. This is used to prevent Plaso trying to run analyzers or extract content from a pipe or socket it encounters while processing a mounted filesystem. Args: file_entry (dfvfs.FileEntry): file entry to consider for skipping. data_stream (dfvfs.DataStream): data stream to consider for skipping. Returns: bool: True if the data stream can be skipped. """ if file_entry.IsFile(): return False if data_stream.IsDefault(): return True return False
Determines if analysis and extraction of a data stream can be skipped. This is used to prevent Plaso trying to run analyzers or extract content from a pipe or socket it encounters while processing a mounted filesystem. Args: file_entry (dfvfs.FileEntry): file entry to consider for skipping. data_stream (dfvfs.DataStream): data stream to consider for skipping. Returns: bool: True if the data stream can be skipped.
Below is the the instruction that describes the task: ### Input: Determines if analysis and extraction of a data stream can be skipped. This is used to prevent Plaso trying to run analyzers or extract content from a pipe or socket it encounters while processing a mounted filesystem. Args: file_entry (dfvfs.FileEntry): file entry to consider for skipping. data_stream (dfvfs.DataStream): data stream to consider for skipping. Returns: bool: True if the data stream can be skipped. ### Response: def _CanSkipDataStream(self, file_entry, data_stream): """Determines if analysis and extraction of a data stream can be skipped. This is used to prevent Plaso trying to run analyzers or extract content from a pipe or socket it encounters while processing a mounted filesystem. Args: file_entry (dfvfs.FileEntry): file entry to consider for skipping. data_stream (dfvfs.DataStream): data stream to consider for skipping. Returns: bool: True if the data stream can be skipped. """ if file_entry.IsFile(): return False if data_stream.IsDefault(): return True return False
def uniq2orderipix_lut(uniq): """ ~30% faster than the method below Parameters ---------- uniq Returns ------- """ order = log2_lut(uniq >> 2) >> 1 ipix = uniq - (1 << (2 * (order + 1))) return order, ipix
~30% faster than the method below Parameters ---------- uniq Returns -------
Below is the the instruction that describes the task: ### Input: ~30% faster than the method below Parameters ---------- uniq Returns ------- ### Response: def uniq2orderipix_lut(uniq): """ ~30% faster than the method below Parameters ---------- uniq Returns ------- """ order = log2_lut(uniq >> 2) >> 1 ipix = uniq - (1 << (2 * (order + 1))) return order, ipix
def read_value(self): """Read the value of this descriptor.""" pass # Kick off a query to read the value of the descriptor, then wait # for the result to return asyncronously. self._value_read.clear() self._device._peripheral.readValueForDescriptor(self._descriptor) if not self._value_read.wait(timeout_sec): raise RuntimeError('Exceeded timeout waiting to read characteristic value!') return self._value
Read the value of this descriptor.
Below is the the instruction that describes the task: ### Input: Read the value of this descriptor. ### Response: def read_value(self): """Read the value of this descriptor.""" pass # Kick off a query to read the value of the descriptor, then wait # for the result to return asyncronously. self._value_read.clear() self._device._peripheral.readValueForDescriptor(self._descriptor) if not self._value_read.wait(timeout_sec): raise RuntimeError('Exceeded timeout waiting to read characteristic value!') return self._value
def tag_(name, repository, tag='latest', force=False): ''' .. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Tag an image into a repository and return ``True``. If the tag was unsuccessful, an error will be raised. name ID of image repository Repository name for the image to be built .. versionadded:: 2018.3.0 tag : latest Tag name for the image to be built .. versionadded:: 2018.3.0 image .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead force : False Force apply tag CLI Example: .. code-block:: bash salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag ''' if not isinstance(repository, six.string_types): repository = six.text_type(repository) if not isinstance(tag, six.string_types): tag = six.text_type(tag) image_id = inspect_image(name)['Id'] response = _client_wrapper('tag', image_id, repository=repository, tag=tag, force=force) _clear_context() # Only non-error return case is a True return, so just return the response return response
.. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Tag an image into a repository and return ``True``. If the tag was unsuccessful, an error will be raised. name ID of image repository Repository name for the image to be built .. versionadded:: 2018.3.0 tag : latest Tag name for the image to be built .. versionadded:: 2018.3.0 image .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead force : False Force apply tag CLI Example: .. code-block:: bash salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag
Below is the the instruction that describes the task: ### Input: .. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Tag an image into a repository and return ``True``. If the tag was unsuccessful, an error will be raised. name ID of image repository Repository name for the image to be built .. versionadded:: 2018.3.0 tag : latest Tag name for the image to be built .. versionadded:: 2018.3.0 image .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead force : False Force apply tag CLI Example: .. code-block:: bash salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag ### Response: def tag_(name, repository, tag='latest', force=False): ''' .. versionchanged:: 2018.3.0 The repository and tag must now be passed separately using the ``repository`` and ``tag`` arguments, rather than together in the (now deprecated) ``image`` argument. Tag an image into a repository and return ``True``. If the tag was unsuccessful, an error will be raised. name ID of image repository Repository name for the image to be built .. versionadded:: 2018.3.0 tag : latest Tag name for the image to be built .. versionadded:: 2018.3.0 image .. deprecated:: 2018.3.0 Use both ``repository`` and ``tag`` instead force : False Force apply tag CLI Example: .. code-block:: bash salt myminion docker.tag 0123456789ab myrepo/mycontainer mytag ''' if not isinstance(repository, six.string_types): repository = six.text_type(repository) if not isinstance(tag, six.string_types): tag = six.text_type(tag) image_id = inspect_image(name)['Id'] response = _client_wrapper('tag', image_id, repository=repository, tag=tag, force=force) _clear_context() # Only non-error return case is a True return, so just return the response return response
def update(self, data): """ :see::meth:RedisMap.update """ result = None if data: _dumps = self._dumps data = { key: _dumps(value) for key, value in data.items()} result = self._client.hmset(self.key_prefix, data) return result
:see::meth:RedisMap.update
Below is the the instruction that describes the task: ### Input: :see::meth:RedisMap.update ### Response: def update(self, data): """ :see::meth:RedisMap.update """ result = None if data: _dumps = self._dumps data = { key: _dumps(value) for key, value in data.items()} result = self._client.hmset(self.key_prefix, data) return result
def play(self, *inputs, **named_inputs): """ Run the selected components of the block. The selected components are run with the already setted options. .. warning:: Defaut 'multiple' behavior is a **pipeline** ! :param *inputs: arguments (i.e. inputs) to give to the components """ # TODO: multi mode option(False, pipeline, map) self.validate() # TODO what if validate fails ? # intialise run meta data start = time.time() self.meta = PlayMeta(self.name) ### manage inputs if len(inputs) and len(named_inputs): raise ValueError("Either `inputs` or `named_inputs` should be provided, not both !") # default input name (so also the default last_output_name) if len(named_inputs): if self.in_name is None: raise ValueError("named inputs given, but the block input's names are unknow") if set(self.in_name) != set(named_inputs.keys()): raise ValueError("Inputs names are not matching with block input's names") inputs = [named_inputs[in_name] for in_name in self.in_name] _break_on_error = True results = {} # run for comp_name in self.selected(): # get the component comp = self._components[comp_name] # get the options if isinstance(comp, Optionable): # note: we force the hidden values only if the call is not # decorated by a "check" (that already force the hidden values) force_hidden = not (hasattr(comp.__call__, '_checked') and comp.__call__._checked) options = comp.get_options_values(hidden=force_hidden) else: options = {} # prepare the Play meta data comp_meta_res = BasicPlayMeta(comp) # it is register right now to be sur to have the data if there is an exception self.meta.append(comp_meta_res) comp_meta_res.run_with(inputs, options) # some logging argstr = [repr(arg)[:100].replace('\n', '') for arg in inputs] self._logger.debug("""'%s' playing: %s component: %s, args=%s, kwargs=%s""" % (self._name, comp.name, comp, "\n\t\t".join(argstr), options)) # run the component ! try: # multi = False or pipeline # given that the input is also the returning value # This behavior allows to modify the data given in input. # actually same arg if given several times # but may be transformed during the process # then finally returned results[self.out_name] = comp(*inputs, **options) #TODO: add validation on inputs name ! # TODO implements different mode for multiple # another way would be declaring a list var outside the loop, # then append result of each call to the components __call__ # and finally returns all computed results # map( lambda x : x(*arg), *components ) # >>> results.append( comp(*args, **options) ) # >>> return *results # TODO catch warnings TODO # warning may be raised for many reasons like: # * options has been modified # * deprecation # * pipeline inconsistency # * invalid input (graph with no edge ...) except Exception as err: # component error handling comp_meta_res.add_error(err) self._logger.error("error in component '%s': %s\n%s" % (comp.name, str(err), traceback.format_exc())) if _break_on_error: raise finally: # store component walltime now = time.time() comp_meta_res.time = now - start start = now #TODO: may return more than one value with multi=map return results
Run the selected components of the block. The selected components are run with the already setted options. .. warning:: Defaut 'multiple' behavior is a **pipeline** ! :param *inputs: arguments (i.e. inputs) to give to the components
Below is the the instruction that describes the task: ### Input: Run the selected components of the block. The selected components are run with the already setted options. .. warning:: Defaut 'multiple' behavior is a **pipeline** ! :param *inputs: arguments (i.e. inputs) to give to the components ### Response: def play(self, *inputs, **named_inputs): """ Run the selected components of the block. The selected components are run with the already setted options. .. warning:: Defaut 'multiple' behavior is a **pipeline** ! :param *inputs: arguments (i.e. inputs) to give to the components """ # TODO: multi mode option(False, pipeline, map) self.validate() # TODO what if validate fails ? # intialise run meta data start = time.time() self.meta = PlayMeta(self.name) ### manage inputs if len(inputs) and len(named_inputs): raise ValueError("Either `inputs` or `named_inputs` should be provided, not both !") # default input name (so also the default last_output_name) if len(named_inputs): if self.in_name is None: raise ValueError("named inputs given, but the block input's names are unknow") if set(self.in_name) != set(named_inputs.keys()): raise ValueError("Inputs names are not matching with block input's names") inputs = [named_inputs[in_name] for in_name in self.in_name] _break_on_error = True results = {} # run for comp_name in self.selected(): # get the component comp = self._components[comp_name] # get the options if isinstance(comp, Optionable): # note: we force the hidden values only if the call is not # decorated by a "check" (that already force the hidden values) force_hidden = not (hasattr(comp.__call__, '_checked') and comp.__call__._checked) options = comp.get_options_values(hidden=force_hidden) else: options = {} # prepare the Play meta data comp_meta_res = BasicPlayMeta(comp) # it is register right now to be sur to have the data if there is an exception self.meta.append(comp_meta_res) comp_meta_res.run_with(inputs, options) # some logging argstr = [repr(arg)[:100].replace('\n', '') for arg in inputs] self._logger.debug("""'%s' playing: %s component: %s, args=%s, kwargs=%s""" % (self._name, comp.name, comp, "\n\t\t".join(argstr), options)) # run the component ! try: # multi = False or pipeline # given that the input is also the returning value # This behavior allows to modify the data given in input. # actually same arg if given several times # but may be transformed during the process # then finally returned results[self.out_name] = comp(*inputs, **options) #TODO: add validation on inputs name ! # TODO implements different mode for multiple # another way would be declaring a list var outside the loop, # then append result of each call to the components __call__ # and finally returns all computed results # map( lambda x : x(*arg), *components ) # >>> results.append( comp(*args, **options) ) # >>> return *results # TODO catch warnings TODO # warning may be raised for many reasons like: # * options has been modified # * deprecation # * pipeline inconsistency # * invalid input (graph with no edge ...) except Exception as err: # component error handling comp_meta_res.add_error(err) self._logger.error("error in component '%s': %s\n%s" % (comp.name, str(err), traceback.format_exc())) if _break_on_error: raise finally: # store component walltime now = time.time() comp_meta_res.time = now - start start = now #TODO: may return more than one value with multi=map return results
def children(self): """ Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2 """ if self.left and self.left.data is not None: yield self.left, 0 if self.right and self.right.data is not None: yield self.right, 1
Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2
Below is the the instruction that describes the task: ### Input: Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2 ### Response: def children(self): """ Returns an iterator for the non-empty children of the Node The children are returned as (Node, pos) tuples where pos is 0 for the left subnode and 1 for the right. >>> len(list(create(dimensions=2).children)) 0 >>> len(list(create([ (1, 2) ]).children)) 0 >>> len(list(create([ (2, 2), (2, 1), (2, 3) ]).children)) 2 """ if self.left and self.left.data is not None: yield self.left, 0 if self.right and self.right.data is not None: yield self.right, 1
def setattr(self, key, value): """ This method sets attribute of a Managed Object. """ if (UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId) != None): if (key in _ManagedObjectMeta[self.classId]): propMeta = UcsUtils.GetUcsPropertyMeta(self.classId, key) if (propMeta.ValidatePropertyValue(value) == False): # print "Validation Failure" return False if (propMeta.mask != None): self.dirtyMask |= propMeta.mask self.__dict__[key] = value else: self.__dict__['XtraProperty'][key] = value else: """ no such property """ self.__dict__['XtraProperty'][UcsUtils.WordU(key)] = value
This method sets attribute of a Managed Object.
Below is the the instruction that describes the task: ### Input: This method sets attribute of a Managed Object. ### Response: def setattr(self, key, value): """ This method sets attribute of a Managed Object. """ if (UcsUtils.FindClassIdInMoMetaIgnoreCase(self.classId) != None): if (key in _ManagedObjectMeta[self.classId]): propMeta = UcsUtils.GetUcsPropertyMeta(self.classId, key) if (propMeta.ValidatePropertyValue(value) == False): # print "Validation Failure" return False if (propMeta.mask != None): self.dirtyMask |= propMeta.mask self.__dict__[key] = value else: self.__dict__['XtraProperty'][key] = value else: """ no such property """ self.__dict__['XtraProperty'][UcsUtils.WordU(key)] = value
def real_main(release_url=None, tests_json_path=None, upload_build_id=None, upload_release_name=None): """Runs diff_my_images.""" coordinator = workers.get_coordinator() fetch_worker.register(coordinator) coordinator.start() data = open(FLAGS.tests_json_path).read() tests = load_tests(data) item = DiffMyImages( release_url, tests, upload_build_id, upload_release_name, heartbeat=workers.PrintWorkflow) item.root = True coordinator.input_queue.put(item) coordinator.wait_one() coordinator.stop() coordinator.join()
Runs diff_my_images.
Below is the the instruction that describes the task: ### Input: Runs diff_my_images. ### Response: def real_main(release_url=None, tests_json_path=None, upload_build_id=None, upload_release_name=None): """Runs diff_my_images.""" coordinator = workers.get_coordinator() fetch_worker.register(coordinator) coordinator.start() data = open(FLAGS.tests_json_path).read() tests = load_tests(data) item = DiffMyImages( release_url, tests, upload_build_id, upload_release_name, heartbeat=workers.PrintWorkflow) item.root = True coordinator.input_queue.put(item) coordinator.wait_one() coordinator.stop() coordinator.join()
def _get_device(self): """ Get the device """ try: device = { "name": self._dev.name, "isReachable": self._dev.isReachable, "isTrusted": self._get_isTrusted(), } except Exception: return None return device
Get the device
Below is the the instruction that describes the task: ### Input: Get the device ### Response: def _get_device(self): """ Get the device """ try: device = { "name": self._dev.name, "isReachable": self._dev.isReachable, "isTrusted": self._get_isTrusted(), } except Exception: return None return device
def get_descendants_group_count(cls, parent=None): """ Helper for a very common case: get a group of siblings and the number of *descendants* in every sibling. """ #~ # disclaimer: this is the FOURTH implementation I wrote for this # function. I really tried to make it return a queryset, but doing so # with a *single* query isn't trivial with Django's ORM. # ok, I DID manage to make Django's ORM return a queryset here, # defining two querysets, passing one subquery in the tables parameters # of .extra() of the second queryset, using the undocumented order_by # feature, and using a HORRIBLE hack to avoid django quoting the # subquery as a table, BUT (and there is always a but) the hack didn't # survive turning the QuerySet into a ValuesQuerySet, so I just used # good old SQL. # NOTE: in case there is interest, the hack to avoid django quoting the # subquery as a table, was adding the subquery to the alias cache of # the queryset's query object: # # qset.query.quote_cache[subquery] = subquery # # If there is a better way to do this in an UNMODIFIED django 1.0, let # me know. #~ cls = get_result_class(cls) vendor = cls.get_database_vendor('write') if parent: depth = parent.depth + 1 params = cls._get_children_path_interval(parent.path) extrand = 'AND path BETWEEN %s AND %s' else: depth = 1 params = [] extrand = '' subpath = sql_substr("path", "1", "%(subpathlen)s", vendor=vendor) sql = ( 'SELECT * FROM %(table)s AS t1 INNER JOIN ' ' (SELECT ' ' ' + subpath + ' AS subpath, ' ' COUNT(1)-1 AS count ' ' FROM %(table)s ' ' WHERE depth >= %(depth)s %(extrand)s' ' GROUP BY '+ subpath + ') AS t2 ' ' ON t1.path=t2.subpath ' ' ORDER BY t1.path' ) % { 'table': connection.ops.quote_name(cls._meta.db_table), 'subpathlen': depth * cls.steplen, 'depth': depth, 'extrand': extrand} cursor = cls._get_database_cursor('write') cursor.execute(sql, params) ret = [] field_names = [field[0] for field in cursor.description] for node_data in cursor.fetchall(): node = cls(**dict(zip(field_names, node_data[:-2]))) node.descendants_count = node_data[-1] ret.append(node) return ret
Helper for a very common case: get a group of siblings and the number of *descendants* in every sibling.
Below is the the instruction that describes the task: ### Input: Helper for a very common case: get a group of siblings and the number of *descendants* in every sibling. ### Response: def get_descendants_group_count(cls, parent=None): """ Helper for a very common case: get a group of siblings and the number of *descendants* in every sibling. """ #~ # disclaimer: this is the FOURTH implementation I wrote for this # function. I really tried to make it return a queryset, but doing so # with a *single* query isn't trivial with Django's ORM. # ok, I DID manage to make Django's ORM return a queryset here, # defining two querysets, passing one subquery in the tables parameters # of .extra() of the second queryset, using the undocumented order_by # feature, and using a HORRIBLE hack to avoid django quoting the # subquery as a table, BUT (and there is always a but) the hack didn't # survive turning the QuerySet into a ValuesQuerySet, so I just used # good old SQL. # NOTE: in case there is interest, the hack to avoid django quoting the # subquery as a table, was adding the subquery to the alias cache of # the queryset's query object: # # qset.query.quote_cache[subquery] = subquery # # If there is a better way to do this in an UNMODIFIED django 1.0, let # me know. #~ cls = get_result_class(cls) vendor = cls.get_database_vendor('write') if parent: depth = parent.depth + 1 params = cls._get_children_path_interval(parent.path) extrand = 'AND path BETWEEN %s AND %s' else: depth = 1 params = [] extrand = '' subpath = sql_substr("path", "1", "%(subpathlen)s", vendor=vendor) sql = ( 'SELECT * FROM %(table)s AS t1 INNER JOIN ' ' (SELECT ' ' ' + subpath + ' AS subpath, ' ' COUNT(1)-1 AS count ' ' FROM %(table)s ' ' WHERE depth >= %(depth)s %(extrand)s' ' GROUP BY '+ subpath + ') AS t2 ' ' ON t1.path=t2.subpath ' ' ORDER BY t1.path' ) % { 'table': connection.ops.quote_name(cls._meta.db_table), 'subpathlen': depth * cls.steplen, 'depth': depth, 'extrand': extrand} cursor = cls._get_database_cursor('write') cursor.execute(sql, params) ret = [] field_names = [field[0] for field in cursor.description] for node_data in cursor.fetchall(): node = cls(**dict(zip(field_names, node_data[:-2]))) node.descendants_count = node_data[-1] ret.append(node) return ret
def get_zmatrix(self): """ Returns a z-matrix representation of the molecule. """ output = [] outputvar = [] for i, site in enumerate(self._mol): if i == 0: output.append("{}".format(site.specie)) elif i == 1: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) output.append("{} {} B{}".format(self._mol[i].specie, nn[0] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) elif i == 2: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) angle = self._mol.get_angle(i, nn[0], nn[1]) output.append("{} {} B{} {} A{}".format(self._mol[i].specie, nn[0] + 1, i, nn[1] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) outputvar.append("A{}={:.6f}".format(i, angle)) else: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) angle = self._mol.get_angle(i, nn[0], nn[1]) dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2]) output.append("{} {} B{} {} A{} {} D{}" .format(self._mol[i].specie, nn[0] + 1, i, nn[1] + 1, i, nn[2] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) outputvar.append("A{}={:.6f}".format(i, angle)) outputvar.append("D{}={:.6f}".format(i, dih)) return "\n".join(output) + "\n\n" + "\n".join(outputvar)
Returns a z-matrix representation of the molecule.
Below is the the instruction that describes the task: ### Input: Returns a z-matrix representation of the molecule. ### Response: def get_zmatrix(self): """ Returns a z-matrix representation of the molecule. """ output = [] outputvar = [] for i, site in enumerate(self._mol): if i == 0: output.append("{}".format(site.specie)) elif i == 1: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) output.append("{} {} B{}".format(self._mol[i].specie, nn[0] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) elif i == 2: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) angle = self._mol.get_angle(i, nn[0], nn[1]) output.append("{} {} B{} {} A{}".format(self._mol[i].specie, nn[0] + 1, i, nn[1] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) outputvar.append("A{}={:.6f}".format(i, angle)) else: nn = self._find_nn_pos_before_site(i) bondlength = self._mol.get_distance(i, nn[0]) angle = self._mol.get_angle(i, nn[0], nn[1]) dih = self._mol.get_dihedral(i, nn[0], nn[1], nn[2]) output.append("{} {} B{} {} A{} {} D{}" .format(self._mol[i].specie, nn[0] + 1, i, nn[1] + 1, i, nn[2] + 1, i)) outputvar.append("B{}={:.6f}".format(i, bondlength)) outputvar.append("A{}={:.6f}".format(i, angle)) outputvar.append("D{}={:.6f}".format(i, dih)) return "\n".join(output) + "\n\n" + "\n".join(outputvar)
def _get_boots(arr, nboots): """ return array of bootstrap D-stats """ ## hold results (nboots, [dstat, ]) boots = np.zeros((nboots,)) ## iterate to fill boots for bidx in xrange(nboots): ## sample with replacement lidx = np.random.randint(0, arr.shape[0], arr.shape[0]) tarr = arr[lidx] _, _, dst = _prop_dstat(tarr) boots[bidx] = dst ## return bootarr return boots
return array of bootstrap D-stats
Below is the the instruction that describes the task: ### Input: return array of bootstrap D-stats ### Response: def _get_boots(arr, nboots): """ return array of bootstrap D-stats """ ## hold results (nboots, [dstat, ]) boots = np.zeros((nboots,)) ## iterate to fill boots for bidx in xrange(nboots): ## sample with replacement lidx = np.random.randint(0, arr.shape[0], arr.shape[0]) tarr = arr[lidx] _, _, dst = _prop_dstat(tarr) boots[bidx] = dst ## return bootarr return boots
def get_recent_comments(number=5, template='zinnia/tags/comments_recent.html'): """ Return the most recent comments. """ # Using map(smart_text... fix bug related to issue #8554 entry_published_pks = map(smart_text, Entry.published.values_list('id', flat=True)) content_type = ContentType.objects.get_for_model(Entry) comments = get_comment_model().objects.filter( Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL), content_type=content_type, object_pk__in=entry_published_pks, is_public=True).order_by('-pk')[:number] comments = comments.prefetch_related('content_object') return {'template': template, 'comments': comments}
Return the most recent comments.
Below is the the instruction that describes the task: ### Input: Return the most recent comments. ### Response: def get_recent_comments(number=5, template='zinnia/tags/comments_recent.html'): """ Return the most recent comments. """ # Using map(smart_text... fix bug related to issue #8554 entry_published_pks = map(smart_text, Entry.published.values_list('id', flat=True)) content_type = ContentType.objects.get_for_model(Entry) comments = get_comment_model().objects.filter( Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL), content_type=content_type, object_pk__in=entry_published_pks, is_public=True).order_by('-pk')[:number] comments = comments.prefetch_related('content_object') return {'template': template, 'comments': comments}
def move_to_start(self, column_label): """Move a column to the first in order.""" self._columns.move_to_end(column_label, last=False) return self
Move a column to the first in order.
Below is the the instruction that describes the task: ### Input: Move a column to the first in order. ### Response: def move_to_start(self, column_label): """Move a column to the first in order.""" self._columns.move_to_end(column_label, last=False) return self
def format_time(self, sec): """ Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time """ # µsec if sec < 0.001: return "{}{}".format( colorize(round(sec*1000000, 2), "purple"), bold("µs")) # ms elif sec < 1.0: return "{}{}".format( colorize(round(sec*1000, 2), "purple"), bold("ms")) # s elif sec < 60.0: return "{}{}".format( colorize(round(sec, 2), "purple"), bold("s")) else: floored = floor(sec/60) return "{}{} {}{}".format( colorize(floored, "purple"), bold("m"), colorize(floor(sec-(floored*60)), "purple"), bold("s"))
Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time
Below is the the instruction that describes the task: ### Input: Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time ### Response: def format_time(self, sec): """ Pretty-formats a given time in a readable manner @sec: #int or #float seconds -> #str formatted time """ # µsec if sec < 0.001: return "{}{}".format( colorize(round(sec*1000000, 2), "purple"), bold("µs")) # ms elif sec < 1.0: return "{}{}".format( colorize(round(sec*1000, 2), "purple"), bold("ms")) # s elif sec < 60.0: return "{}{}".format( colorize(round(sec, 2), "purple"), bold("s")) else: floored = floor(sec/60) return "{}{} {}{}".format( colorize(floored, "purple"), bold("m"), colorize(floor(sec-(floored*60)), "purple"), bold("s"))
def expire(self, keyid, expiration_time='1y', passphrase=None, expire_subkeys=True): """Changes GnuPG key expiration by passing in new time period (from now) through subprocess's stdin >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.expire(key.fingerprint, '2w', 'good passphrase') :param str keyid: key shortID, longID, email_address or fingerprint :param str expiration_time: 0 or number of days (d), or weeks (*w) , or months (*m) or years (*y) for when to expire the key, from today. :param str passphrase: passphrase used when creating the key, leave None otherwise :param bool expire_subkeys: to indicate whether the subkeys will also change the expiration time by the same period -- default is True :returns: The result giving status of the change in expiration... the new expiration date can be obtained by .list_keys() """ passphrase = passphrase.encode(self._encoding) if passphrase else passphrase try: sub_keys_number = len(self.list_sigs(keyid)[0]['subkeys']) if expire_subkeys else 0 except IndexError: sub_keys_number = 0 expiration_input = KeyExpirationInterface(expiration_time, passphrase).gpg_interactive_input(sub_keys_number) args = ["--command-fd 0", "--edit-key %s" % keyid] p = self._open_subprocess(args) p.stdin.write(b(expiration_input)) result = self._result_map['expire'](self) p.stdin.write(b(expiration_input)) self._collect_output(p, result, stdin=p.stdin) return result
Changes GnuPG key expiration by passing in new time period (from now) through subprocess's stdin >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.expire(key.fingerprint, '2w', 'good passphrase') :param str keyid: key shortID, longID, email_address or fingerprint :param str expiration_time: 0 or number of days (d), or weeks (*w) , or months (*m) or years (*y) for when to expire the key, from today. :param str passphrase: passphrase used when creating the key, leave None otherwise :param bool expire_subkeys: to indicate whether the subkeys will also change the expiration time by the same period -- default is True :returns: The result giving status of the change in expiration... the new expiration date can be obtained by .list_keys()
Below is the the instruction that describes the task: ### Input: Changes GnuPG key expiration by passing in new time period (from now) through subprocess's stdin >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.expire(key.fingerprint, '2w', 'good passphrase') :param str keyid: key shortID, longID, email_address or fingerprint :param str expiration_time: 0 or number of days (d), or weeks (*w) , or months (*m) or years (*y) for when to expire the key, from today. :param str passphrase: passphrase used when creating the key, leave None otherwise :param bool expire_subkeys: to indicate whether the subkeys will also change the expiration time by the same period -- default is True :returns: The result giving status of the change in expiration... the new expiration date can be obtained by .list_keys() ### Response: def expire(self, keyid, expiration_time='1y', passphrase=None, expire_subkeys=True): """Changes GnuPG key expiration by passing in new time period (from now) through subprocess's stdin >>> import gnupg >>> gpg = gnupg.GPG(homedir="doctests") >>> key_input = gpg.gen_key_input() >>> key = gpg.gen_key(key_input) >>> gpg.expire(key.fingerprint, '2w', 'good passphrase') :param str keyid: key shortID, longID, email_address or fingerprint :param str expiration_time: 0 or number of days (d), or weeks (*w) , or months (*m) or years (*y) for when to expire the key, from today. :param str passphrase: passphrase used when creating the key, leave None otherwise :param bool expire_subkeys: to indicate whether the subkeys will also change the expiration time by the same period -- default is True :returns: The result giving status of the change in expiration... the new expiration date can be obtained by .list_keys() """ passphrase = passphrase.encode(self._encoding) if passphrase else passphrase try: sub_keys_number = len(self.list_sigs(keyid)[0]['subkeys']) if expire_subkeys else 0 except IndexError: sub_keys_number = 0 expiration_input = KeyExpirationInterface(expiration_time, passphrase).gpg_interactive_input(sub_keys_number) args = ["--command-fd 0", "--edit-key %s" % keyid] p = self._open_subprocess(args) p.stdin.write(b(expiration_input)) result = self._result_map['expire'](self) p.stdin.write(b(expiration_input)) self._collect_output(p, result, stdin=p.stdin) return result
def _write_internal(self, iterator, assets): """ Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, ctable). """ total_rows = 0 first_row = {} last_row = {} calendar_offset = {} # Maps column name -> output carray. columns = { k: carray(array([], dtype=uint32_dtype)) for k in US_EQUITY_PRICING_BCOLZ_COLUMNS } earliest_date = None sessions = self._calendar.sessions_in_range( self._start_session, self._end_session ) if assets is not None: @apply def iterator(iterator=iterator, assets=set(assets)): for asset_id, table in iterator: if asset_id not in assets: raise ValueError('unknown asset id %r' % asset_id) yield asset_id, table for asset_id, table in iterator: nrows = len(table) for column_name in columns: if column_name == 'id': # We know what the content of this column is, so don't # bother reading it. columns['id'].append( full((nrows,), asset_id, dtype='uint32'), ) continue columns[column_name].append(table[column_name]) if earliest_date is None: earliest_date = table["day"][0] else: earliest_date = min(earliest_date, table["day"][0]) # Bcolz doesn't support ints as keys in `attrs`, so convert # assets to strings for use as attr keys. asset_key = str(asset_id) # Calculate the index into the array of the first and last row # for this asset. This allows us to efficiently load single # assets when querying the data back out of the table. first_row[asset_key] = total_rows last_row[asset_key] = total_rows + nrows - 1 total_rows += nrows table_day_to_session = compose( self._calendar.minute_to_session_label, partial(Timestamp, unit='s', tz='UTC'), ) asset_first_day = table_day_to_session(table['day'][0]) asset_last_day = table_day_to_session(table['day'][-1]) asset_sessions = sessions[ sessions.slice_indexer(asset_first_day, asset_last_day) ] assert len(table) == len(asset_sessions), ( 'Got {} rows for daily bars table with first day={}, last ' 'day={}, expected {} rows.\n' 'Missing sessions: {}\n' 'Extra sessions: {}'.format( len(table), asset_first_day.date(), asset_last_day.date(), len(asset_sessions), asset_sessions.difference( to_datetime( np.array(table['day']), unit='s', utc=True, ) ).tolist(), to_datetime( np.array(table['day']), unit='s', utc=True, ).difference(asset_sessions).tolist(), ) ) # Calculate the number of trading days between the first date # in the stored data and the first date of **this** asset. This # offset used for output alignment by the reader. calendar_offset[asset_key] = sessions.get_loc(asset_first_day) # This writes the table to disk. full_table = ctable( columns=[ columns[colname] for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS ], names=US_EQUITY_PRICING_BCOLZ_COLUMNS, rootdir=self._filename, mode='w', ) full_table.attrs['first_trading_day'] = ( earliest_date if earliest_date is not None else iNaT ) full_table.attrs['first_row'] = first_row full_table.attrs['last_row'] = last_row full_table.attrs['calendar_offset'] = calendar_offset full_table.attrs['calendar_name'] = self._calendar.name full_table.attrs['start_session_ns'] = self._start_session.value full_table.attrs['end_session_ns'] = self._end_session.value full_table.flush() return full_table
Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, ctable).
Below is the the instruction that describes the task: ### Input: Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, ctable). ### Response: def _write_internal(self, iterator, assets): """ Internal implementation of write. `iterator` should be an iterator yielding pairs of (asset, ctable). """ total_rows = 0 first_row = {} last_row = {} calendar_offset = {} # Maps column name -> output carray. columns = { k: carray(array([], dtype=uint32_dtype)) for k in US_EQUITY_PRICING_BCOLZ_COLUMNS } earliest_date = None sessions = self._calendar.sessions_in_range( self._start_session, self._end_session ) if assets is not None: @apply def iterator(iterator=iterator, assets=set(assets)): for asset_id, table in iterator: if asset_id not in assets: raise ValueError('unknown asset id %r' % asset_id) yield asset_id, table for asset_id, table in iterator: nrows = len(table) for column_name in columns: if column_name == 'id': # We know what the content of this column is, so don't # bother reading it. columns['id'].append( full((nrows,), asset_id, dtype='uint32'), ) continue columns[column_name].append(table[column_name]) if earliest_date is None: earliest_date = table["day"][0] else: earliest_date = min(earliest_date, table["day"][0]) # Bcolz doesn't support ints as keys in `attrs`, so convert # assets to strings for use as attr keys. asset_key = str(asset_id) # Calculate the index into the array of the first and last row # for this asset. This allows us to efficiently load single # assets when querying the data back out of the table. first_row[asset_key] = total_rows last_row[asset_key] = total_rows + nrows - 1 total_rows += nrows table_day_to_session = compose( self._calendar.minute_to_session_label, partial(Timestamp, unit='s', tz='UTC'), ) asset_first_day = table_day_to_session(table['day'][0]) asset_last_day = table_day_to_session(table['day'][-1]) asset_sessions = sessions[ sessions.slice_indexer(asset_first_day, asset_last_day) ] assert len(table) == len(asset_sessions), ( 'Got {} rows for daily bars table with first day={}, last ' 'day={}, expected {} rows.\n' 'Missing sessions: {}\n' 'Extra sessions: {}'.format( len(table), asset_first_day.date(), asset_last_day.date(), len(asset_sessions), asset_sessions.difference( to_datetime( np.array(table['day']), unit='s', utc=True, ) ).tolist(), to_datetime( np.array(table['day']), unit='s', utc=True, ).difference(asset_sessions).tolist(), ) ) # Calculate the number of trading days between the first date # in the stored data and the first date of **this** asset. This # offset used for output alignment by the reader. calendar_offset[asset_key] = sessions.get_loc(asset_first_day) # This writes the table to disk. full_table = ctable( columns=[ columns[colname] for colname in US_EQUITY_PRICING_BCOLZ_COLUMNS ], names=US_EQUITY_PRICING_BCOLZ_COLUMNS, rootdir=self._filename, mode='w', ) full_table.attrs['first_trading_day'] = ( earliest_date if earliest_date is not None else iNaT ) full_table.attrs['first_row'] = first_row full_table.attrs['last_row'] = last_row full_table.attrs['calendar_offset'] = calendar_offset full_table.attrs['calendar_name'] = self._calendar.name full_table.attrs['start_session_ns'] = self._start_session.value full_table.attrs['end_session_ns'] = self._end_session.value full_table.flush() return full_table
def get(package_str, classname): '''Retrieve from the internal cache a class instance. All arguments are case-insensitive''' if (package_str in _dynamo_cache) and (classname in _dynamo_cache[package_str]): return _dynamo_cache[package_str][classname] return None
Retrieve from the internal cache a class instance. All arguments are case-insensitive
Below is the the instruction that describes the task: ### Input: Retrieve from the internal cache a class instance. All arguments are case-insensitive ### Response: def get(package_str, classname): '''Retrieve from the internal cache a class instance. All arguments are case-insensitive''' if (package_str in _dynamo_cache) and (classname in _dynamo_cache[package_str]): return _dynamo_cache[package_str][classname] return None
def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s)
Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature.
Below is the the instruction that describes the task: ### Input: Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. ### Response: def from_der(der): """ Decodes a Signature that was DER-encoded. Args: der (bytes or str): The DER encoding to be decoded. Returns: Signature: The deserialized signature. """ d = get_bytes(der) # d must conform to (from btcd): # [0 ] 0x30 - ASN.1 identifier for sequence # [1 ] <1-byte> - total remaining length # [2 ] 0x02 - ASN.1 identifier to specify an integer follows # [3 ] <1-byte> - length of R # [4.] <bytes> - R # [..] 0x02 - ASN.1 identifier to specify an integer follows # [..] <1-byte> - length of S # [..] <bytes> - S # 6 bytes + R (min. 1 byte) + S (min. 1 byte) if len(d) < 8: raise ValueError("DER signature string is too short.") # 6 bytes + R (max. 33 bytes) + S (max. 33 bytes) if len(d) > 72: raise ValueError("DER signature string is too long.") if d[0] != 0x30: raise ValueError("DER signature does not start with 0x30.") if d[1] != len(d[2:]): raise ValueError("DER signature length incorrect.") total_length = d[1] if d[2] != 0x02: raise ValueError("DER signature no 1st int marker.") if d[3] <= 0 or d[3] > (total_length - 7): raise ValueError("DER signature incorrect R length.") # Grab R, check for errors rlen = d[3] s_magic_index = 4 + rlen rb = d[4:s_magic_index] if rb[0] & 0x80 != 0: raise ValueError("DER signature R is negative.") if len(rb) > 1 and rb[0] == 0 and rb[1] & 0x80 != 0x80: raise ValueError("DER signature R is excessively padded.") r = int.from_bytes(rb, 'big') # Grab S, check for errors if d[s_magic_index] != 0x02: raise ValueError("DER signature no 2nd int marker.") slen_index = s_magic_index + 1 slen = d[slen_index] if slen <= 0 or slen > len(d) - (slen_index + 1): raise ValueError("DER signature incorrect S length.") sb = d[slen_index + 1:] if sb[0] & 0x80 != 0: raise ValueError("DER signature S is negative.") if len(sb) > 1 and sb[0] == 0 and sb[1] & 0x80 != 0x80: raise ValueError("DER signature S is excessively padded.") s = int.from_bytes(sb, 'big') if r < 1 or r >= bitcoin_curve.n: raise ValueError("DER signature R is not between 1 and N - 1.") if s < 1 or s >= bitcoin_curve.n: raise ValueError("DER signature S is not between 1 and N - 1.") return Signature(r, s)
def departments_name_show(self, name, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/departments#get-department-by-name" api_path = "/api/v2/departments/name/{name}" api_path = api_path.format(name=name) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/chat/departments#get-department-by-name
Below is the the instruction that describes the task: ### Input: https://developer.zendesk.com/rest_api/docs/chat/departments#get-department-by-name ### Response: def departments_name_show(self, name, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/departments#get-department-by-name" api_path = "/api/v2/departments/name/{name}" api_path = api_path.format(name=name) return self.call(api_path, **kwargs)
def schedule_entities_reindex(entities): """ :param entities: as returned by :func:`get_entities_for_reindex` """ entities = [(e[0], e[1], e[2], dict(e[3])) for e in entities] return index_update.apply_async(kwargs={"index": "default", "items": entities})
:param entities: as returned by :func:`get_entities_for_reindex`
Below is the the instruction that describes the task: ### Input: :param entities: as returned by :func:`get_entities_for_reindex` ### Response: def schedule_entities_reindex(entities): """ :param entities: as returned by :func:`get_entities_for_reindex` """ entities = [(e[0], e[1], e[2], dict(e[3])) for e in entities] return index_update.apply_async(kwargs={"index": "default", "items": entities})
def generate_combined_fasta(self, genome_list, genome_dir): '''Generate a combined fasta using the genbank files. Args genome_list (list) genome_dir (string) ''' fasta = [] for genome in genome_list: full_path = genome_dir + genome handle = open(full_path, "rU") print 'making combined fasta for', genome try: seq_record = SeqIO.read(handle, 'genbank') org_accession = seq_record.name except AssertionError,e: print str(e), genome for feature in seq_record.features: if feature.type == 'CDS': try: prot_accession = feature.qualifiers['protein_id'][0] prot_translation = feature.qualifiers['translation'][0] newfast = '>' + org_accession + ',' + prot_accession + \ '\n' + prot_translation + '\n' #if newfast not in fasta: fasta.append(newfast) except AttributeError,e: print "organism %s, protein %s did not have \ the right attributes" % (org_accession, prot_accession) print str(e) except KeyError,e: print "organism %s, protein %s did not have \ the right key" % (org_accession, prot_accession) print str(e) handle.close() print "%s proteins were added" % len(fasta) set_fasta = set(fasta) print "%s unique proteins were added -- dropping redundant ones" % len(set_fasta) faastring = "".join(set_fasta) write_fasta = open('combined_fasta', 'w') write_fasta.write(faastring) write_fasta.close() return set_fasta
Generate a combined fasta using the genbank files. Args genome_list (list) genome_dir (string)
Below is the the instruction that describes the task: ### Input: Generate a combined fasta using the genbank files. Args genome_list (list) genome_dir (string) ### Response: def generate_combined_fasta(self, genome_list, genome_dir): '''Generate a combined fasta using the genbank files. Args genome_list (list) genome_dir (string) ''' fasta = [] for genome in genome_list: full_path = genome_dir + genome handle = open(full_path, "rU") print 'making combined fasta for', genome try: seq_record = SeqIO.read(handle, 'genbank') org_accession = seq_record.name except AssertionError,e: print str(e), genome for feature in seq_record.features: if feature.type == 'CDS': try: prot_accession = feature.qualifiers['protein_id'][0] prot_translation = feature.qualifiers['translation'][0] newfast = '>' + org_accession + ',' + prot_accession + \ '\n' + prot_translation + '\n' #if newfast not in fasta: fasta.append(newfast) except AttributeError,e: print "organism %s, protein %s did not have \ the right attributes" % (org_accession, prot_accession) print str(e) except KeyError,e: print "organism %s, protein %s did not have \ the right key" % (org_accession, prot_accession) print str(e) handle.close() print "%s proteins were added" % len(fasta) set_fasta = set(fasta) print "%s unique proteins were added -- dropping redundant ones" % len(set_fasta) faastring = "".join(set_fasta) write_fasta = open('combined_fasta', 'w') write_fasta.write(faastring) write_fasta.close() return set_fasta
def gen_design(stimtime_files, scan_duration, TR, style='FSL', temp_res=0.01, hrf_para={'response_delay': 6, 'undershoot_delay': 12, 'response_dispersion': 0.9, 'undershoot_dispersion': 0.9, 'undershoot_scale': 0.035}): """ Generate design matrix based on a list of names of stimulus timing files. The function will read each file, and generate a numpy array of size [time_points \\* condition], where time_points equals duration / TR, and condition is the size of stimtime_filenames. Each column is the hypothetical fMRI response based on the stimulus timing in the corresponding file of stimtime_files. This function uses generate_stimfunction and double_gamma_hrf of brainiak.utils.fmrisim. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files will be interpretated based on the style parameter. Details are explained under the style parameter. scan_duration: float or a list (or a 1D numpy array) of numbers. Total duration of each fMRI scan, in unit of seconds. If there are multiple runs, the duration should be a list (or 1-d numpy array) of numbers. If it is a list, then each number in the list represents the duration of the corresponding scan in the stimtime_files. If only a number is provided, it is assumed that there is only one fMRI scan lasting for scan_duration. TR: float. The sampling period of fMRI, in unit of seconds. style: string, default: 'FSL' Acceptable inputs: 'FSL', 'AFNI' The formating style of the stimtime_files. 'FSL' style has one line for each event of the same condition. Each line contains three numbers. The first number is the onset of the event relative to the onset of the first scan, in units of seconds. (Multiple scans should be treated as a concatenated long scan for the purpose of calculating onsets. However, the design matrix from one scan won't leak into the next). The second number is the duration of the event, in unit of seconds. The third number is the amplitude modulation (or weight) of the response. It is acceptable to not provide the weight, or not provide both duration and weight. In such cases, these parameters will default to 1.0. This code will accept timing files with only 1 or 2 columns for convenience but please note that the FSL package does not allow this 'AFNI' style has one line for each scan (run). Each line has a few triplets in the format of stim_onsets*weight:duration (or simpler, see below), separated by spaces. For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s, modulated by weight of 2.0 and lasts for 1.5s. If some run does not include a single event of a condition (stimulus type), then you can put \\*, or a negative number, or a very large number in that line. Either duration or weight can be neglected. In such cases, they will default to 1.0. For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all means an event starting at 3.0s, lasting for 1.0s, with amplitude modulation of 1.0. temp_res: float, default: 0.01 Temporal resolution of fMRI, in second. hrf_para: dictionary The parameters of the double-Gamma hemodynamic response function. To set different parameters, supply a dictionary with the same set of keys as the default, and replace the corresponding values with the new values. Returns ------- design: 2D numpy array design matrix. Each time row represents one TR (fMRI sampling time point) and each column represents one experiment condition, in the order in stimtime_files """ if np.ndim(scan_duration) == 0: scan_duration = [scan_duration] scan_duration = np.array(scan_duration) assert np.all(scan_duration > TR), \ 'scan duration should be longer than a TR' if type(stimtime_files) is str: stimtime_files = [stimtime_files] assert TR > 0, 'TR should be positive' assert style == 'FSL' or style == 'AFNI', 'style can only be FSL or AFNI' n_C = len(stimtime_files) # number of conditions n_S = np.size(scan_duration) # number of scans if n_S > 1: design = [np.empty([int(np.round(duration / TR)), n_C]) for duration in scan_duration] else: design = [np.empty([int(np.round(scan_duration / TR)), n_C])] scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0) if style == 'FSL': design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff) elif style == 'AFNI': design_info = _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff) response_delay = hrf_para['response_delay'] undershoot_delay = hrf_para['undershoot_delay'] response_disp = hrf_para['response_dispersion'] undershoot_disp = hrf_para['undershoot_dispersion'] undershoot_scale = hrf_para['undershoot_scale'] # generate design matrix for i_s in range(n_S): for i_c in range(n_C): if len(design_info[i_s][i_c]['onset']) > 0: stimfunction = generate_stimfunction( onsets=design_info[i_s][i_c]['onset'], event_durations=design_info[i_s][i_c]['duration'], total_time=scan_duration[i_s], weights=design_info[i_s][i_c]['weight'], temporal_resolution=1.0/temp_res) hrf = _double_gamma_hrf(response_delay=response_delay, undershoot_delay=undershoot_delay, response_dispersion=response_disp, undershoot_dispersion=undershoot_disp, undershoot_scale=undershoot_scale, temporal_resolution=1.0/temp_res) design[i_s][:, i_c] = convolve_hrf( stimfunction, TR, hrf_type=hrf, scale_function=False, temporal_resolution=1.0 / temp_res).transpose() * temp_res else: design[i_s][:, i_c] = 0.0 # We multiply the resulting design matrix with # the temporal resolution to normalize it. # We do not use the internal normalization # in double_gamma_hrf because it does not guarantee # normalizing with the same constant. return np.concatenate(design, axis=0)
Generate design matrix based on a list of names of stimulus timing files. The function will read each file, and generate a numpy array of size [time_points \\* condition], where time_points equals duration / TR, and condition is the size of stimtime_filenames. Each column is the hypothetical fMRI response based on the stimulus timing in the corresponding file of stimtime_files. This function uses generate_stimfunction and double_gamma_hrf of brainiak.utils.fmrisim. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files will be interpretated based on the style parameter. Details are explained under the style parameter. scan_duration: float or a list (or a 1D numpy array) of numbers. Total duration of each fMRI scan, in unit of seconds. If there are multiple runs, the duration should be a list (or 1-d numpy array) of numbers. If it is a list, then each number in the list represents the duration of the corresponding scan in the stimtime_files. If only a number is provided, it is assumed that there is only one fMRI scan lasting for scan_duration. TR: float. The sampling period of fMRI, in unit of seconds. style: string, default: 'FSL' Acceptable inputs: 'FSL', 'AFNI' The formating style of the stimtime_files. 'FSL' style has one line for each event of the same condition. Each line contains three numbers. The first number is the onset of the event relative to the onset of the first scan, in units of seconds. (Multiple scans should be treated as a concatenated long scan for the purpose of calculating onsets. However, the design matrix from one scan won't leak into the next). The second number is the duration of the event, in unit of seconds. The third number is the amplitude modulation (or weight) of the response. It is acceptable to not provide the weight, or not provide both duration and weight. In such cases, these parameters will default to 1.0. This code will accept timing files with only 1 or 2 columns for convenience but please note that the FSL package does not allow this 'AFNI' style has one line for each scan (run). Each line has a few triplets in the format of stim_onsets*weight:duration (or simpler, see below), separated by spaces. For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s, modulated by weight of 2.0 and lasts for 1.5s. If some run does not include a single event of a condition (stimulus type), then you can put \\*, or a negative number, or a very large number in that line. Either duration or weight can be neglected. In such cases, they will default to 1.0. For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all means an event starting at 3.0s, lasting for 1.0s, with amplitude modulation of 1.0. temp_res: float, default: 0.01 Temporal resolution of fMRI, in second. hrf_para: dictionary The parameters of the double-Gamma hemodynamic response function. To set different parameters, supply a dictionary with the same set of keys as the default, and replace the corresponding values with the new values. Returns ------- design: 2D numpy array design matrix. Each time row represents one TR (fMRI sampling time point) and each column represents one experiment condition, in the order in stimtime_files
Below is the the instruction that describes the task: ### Input: Generate design matrix based on a list of names of stimulus timing files. The function will read each file, and generate a numpy array of size [time_points \\* condition], where time_points equals duration / TR, and condition is the size of stimtime_filenames. Each column is the hypothetical fMRI response based on the stimulus timing in the corresponding file of stimtime_files. This function uses generate_stimfunction and double_gamma_hrf of brainiak.utils.fmrisim. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files will be interpretated based on the style parameter. Details are explained under the style parameter. scan_duration: float or a list (or a 1D numpy array) of numbers. Total duration of each fMRI scan, in unit of seconds. If there are multiple runs, the duration should be a list (or 1-d numpy array) of numbers. If it is a list, then each number in the list represents the duration of the corresponding scan in the stimtime_files. If only a number is provided, it is assumed that there is only one fMRI scan lasting for scan_duration. TR: float. The sampling period of fMRI, in unit of seconds. style: string, default: 'FSL' Acceptable inputs: 'FSL', 'AFNI' The formating style of the stimtime_files. 'FSL' style has one line for each event of the same condition. Each line contains three numbers. The first number is the onset of the event relative to the onset of the first scan, in units of seconds. (Multiple scans should be treated as a concatenated long scan for the purpose of calculating onsets. However, the design matrix from one scan won't leak into the next). The second number is the duration of the event, in unit of seconds. The third number is the amplitude modulation (or weight) of the response. It is acceptable to not provide the weight, or not provide both duration and weight. In such cases, these parameters will default to 1.0. This code will accept timing files with only 1 or 2 columns for convenience but please note that the FSL package does not allow this 'AFNI' style has one line for each scan (run). Each line has a few triplets in the format of stim_onsets*weight:duration (or simpler, see below), separated by spaces. For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s, modulated by weight of 2.0 and lasts for 1.5s. If some run does not include a single event of a condition (stimulus type), then you can put \\*, or a negative number, or a very large number in that line. Either duration or weight can be neglected. In such cases, they will default to 1.0. For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all means an event starting at 3.0s, lasting for 1.0s, with amplitude modulation of 1.0. temp_res: float, default: 0.01 Temporal resolution of fMRI, in second. hrf_para: dictionary The parameters of the double-Gamma hemodynamic response function. To set different parameters, supply a dictionary with the same set of keys as the default, and replace the corresponding values with the new values. Returns ------- design: 2D numpy array design matrix. Each time row represents one TR (fMRI sampling time point) and each column represents one experiment condition, in the order in stimtime_files ### Response: def gen_design(stimtime_files, scan_duration, TR, style='FSL', temp_res=0.01, hrf_para={'response_delay': 6, 'undershoot_delay': 12, 'response_dispersion': 0.9, 'undershoot_dispersion': 0.9, 'undershoot_scale': 0.035}): """ Generate design matrix based on a list of names of stimulus timing files. The function will read each file, and generate a numpy array of size [time_points \\* condition], where time_points equals duration / TR, and condition is the size of stimtime_filenames. Each column is the hypothetical fMRI response based on the stimulus timing in the corresponding file of stimtime_files. This function uses generate_stimfunction and double_gamma_hrf of brainiak.utils.fmrisim. Parameters ---------- stimtime_files: a string or a list of string. Each string is the name of the file storing the stimulus timing information of one task condition. The contents in the files will be interpretated based on the style parameter. Details are explained under the style parameter. scan_duration: float or a list (or a 1D numpy array) of numbers. Total duration of each fMRI scan, in unit of seconds. If there are multiple runs, the duration should be a list (or 1-d numpy array) of numbers. If it is a list, then each number in the list represents the duration of the corresponding scan in the stimtime_files. If only a number is provided, it is assumed that there is only one fMRI scan lasting for scan_duration. TR: float. The sampling period of fMRI, in unit of seconds. style: string, default: 'FSL' Acceptable inputs: 'FSL', 'AFNI' The formating style of the stimtime_files. 'FSL' style has one line for each event of the same condition. Each line contains three numbers. The first number is the onset of the event relative to the onset of the first scan, in units of seconds. (Multiple scans should be treated as a concatenated long scan for the purpose of calculating onsets. However, the design matrix from one scan won't leak into the next). The second number is the duration of the event, in unit of seconds. The third number is the amplitude modulation (or weight) of the response. It is acceptable to not provide the weight, or not provide both duration and weight. In such cases, these parameters will default to 1.0. This code will accept timing files with only 1 or 2 columns for convenience but please note that the FSL package does not allow this 'AFNI' style has one line for each scan (run). Each line has a few triplets in the format of stim_onsets*weight:duration (or simpler, see below), separated by spaces. For example, 3.2\\*2.0:1.5 means that one event starts at 3.2s, modulated by weight of 2.0 and lasts for 1.5s. If some run does not include a single event of a condition (stimulus type), then you can put \\*, or a negative number, or a very large number in that line. Either duration or weight can be neglected. In such cases, they will default to 1.0. For example, 3.0, 3.0\\*1.0, 3.0:1.0 and 3.0\\*1.0:1.0 all means an event starting at 3.0s, lasting for 1.0s, with amplitude modulation of 1.0. temp_res: float, default: 0.01 Temporal resolution of fMRI, in second. hrf_para: dictionary The parameters of the double-Gamma hemodynamic response function. To set different parameters, supply a dictionary with the same set of keys as the default, and replace the corresponding values with the new values. Returns ------- design: 2D numpy array design matrix. Each time row represents one TR (fMRI sampling time point) and each column represents one experiment condition, in the order in stimtime_files """ if np.ndim(scan_duration) == 0: scan_duration = [scan_duration] scan_duration = np.array(scan_duration) assert np.all(scan_duration > TR), \ 'scan duration should be longer than a TR' if type(stimtime_files) is str: stimtime_files = [stimtime_files] assert TR > 0, 'TR should be positive' assert style == 'FSL' or style == 'AFNI', 'style can only be FSL or AFNI' n_C = len(stimtime_files) # number of conditions n_S = np.size(scan_duration) # number of scans if n_S > 1: design = [np.empty([int(np.round(duration / TR)), n_C]) for duration in scan_duration] else: design = [np.empty([int(np.round(scan_duration / TR)), n_C])] scan_onoff = np.insert(np.cumsum(scan_duration), 0, 0) if style == 'FSL': design_info = _read_stimtime_FSL(stimtime_files, n_C, n_S, scan_onoff) elif style == 'AFNI': design_info = _read_stimtime_AFNI(stimtime_files, n_C, n_S, scan_onoff) response_delay = hrf_para['response_delay'] undershoot_delay = hrf_para['undershoot_delay'] response_disp = hrf_para['response_dispersion'] undershoot_disp = hrf_para['undershoot_dispersion'] undershoot_scale = hrf_para['undershoot_scale'] # generate design matrix for i_s in range(n_S): for i_c in range(n_C): if len(design_info[i_s][i_c]['onset']) > 0: stimfunction = generate_stimfunction( onsets=design_info[i_s][i_c]['onset'], event_durations=design_info[i_s][i_c]['duration'], total_time=scan_duration[i_s], weights=design_info[i_s][i_c]['weight'], temporal_resolution=1.0/temp_res) hrf = _double_gamma_hrf(response_delay=response_delay, undershoot_delay=undershoot_delay, response_dispersion=response_disp, undershoot_dispersion=undershoot_disp, undershoot_scale=undershoot_scale, temporal_resolution=1.0/temp_res) design[i_s][:, i_c] = convolve_hrf( stimfunction, TR, hrf_type=hrf, scale_function=False, temporal_resolution=1.0 / temp_res).transpose() * temp_res else: design[i_s][:, i_c] = 0.0 # We multiply the resulting design matrix with # the temporal resolution to normalize it. # We do not use the internal normalization # in double_gamma_hrf because it does not guarantee # normalizing with the same constant. return np.concatenate(design, axis=0)
def start(self): """ Starts running the timer. If the timer is currently running, then this method will do nothing. :sa stop, reset """ if self._timer.isActive(): return self._starttime = datetime.datetime.now() self._timer.start()
Starts running the timer. If the timer is currently running, then this method will do nothing. :sa stop, reset
Below is the the instruction that describes the task: ### Input: Starts running the timer. If the timer is currently running, then this method will do nothing. :sa stop, reset ### Response: def start(self): """ Starts running the timer. If the timer is currently running, then this method will do nothing. :sa stop, reset """ if self._timer.isActive(): return self._starttime = datetime.datetime.now() self._timer.start()
def lon_lat(value): """ :param value: a pair of coordinates :returns: a tuple (longitude, latitude) >>> lon_lat('12 14') (12.0, 14.0) """ lon, lat = value.split() return longitude(lon), latitude(lat)
:param value: a pair of coordinates :returns: a tuple (longitude, latitude) >>> lon_lat('12 14') (12.0, 14.0)
Below is the the instruction that describes the task: ### Input: :param value: a pair of coordinates :returns: a tuple (longitude, latitude) >>> lon_lat('12 14') (12.0, 14.0) ### Response: def lon_lat(value): """ :param value: a pair of coordinates :returns: a tuple (longitude, latitude) >>> lon_lat('12 14') (12.0, 14.0) """ lon, lat = value.split() return longitude(lon), latitude(lat)
def more_like_this(self, query, fields, columns=None, start=0, rows=30): """ Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = {'q' : query, 'json.nl': 'map', 'mlt.fl': mlt_fields, 'fl': ",".join(columns), 'start': str(start), 'rows': str(rows), 'wt': "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30)
Below is the the instruction that describes the task: ### Input: Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) ### Response: def more_like_this(self, query, fields, columns=None, start=0, rows=30): """ Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30) """ if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = {'q' : query, 'json.nl': 'map', 'mlt.fl': mlt_fields, 'fl': ",".join(columns), 'start': str(start), 'rows': str(rows), 'wt': "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results # Check for response status if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
def get_netspeak_map(): """ Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know """ dfs = pd.read_html('https://www.webopedia.com/quick_ref/textmessageabbreviations.asp') df = dfs[0].drop(index=0) df.columns = ['abbrev', 'definition'] csv_path = os.path.join(DATA_PATH, 'netspeak.csv') logger.info('Saving netspeak dictionary (word mapping) to {}'.format(csv_path)) df.to_csv(csv_path) return df
Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know
Below is the the instruction that describes the task: ### Input: Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know ### Response: def get_netspeak_map(): """ Retrieve mapping from chat/text abbreviations and acronyms like LMK => Let Me Know """ dfs = pd.read_html('https://www.webopedia.com/quick_ref/textmessageabbreviations.asp') df = dfs[0].drop(index=0) df.columns = ['abbrev', 'definition'] csv_path = os.path.join(DATA_PATH, 'netspeak.csv') logger.info('Saving netspeak dictionary (word mapping) to {}'.format(csv_path)) df.to_csv(csv_path) return df
def _update_rr_ce_entry(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry. ''' if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None: celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area added_block, block, offset = self.pvd.add_rr_ce_entry(celen) rec.rock_ridge.update_ce_block(block) rec.rock_ridge.dr_entries.ce_record.update_offset(offset) if added_block: return self.pvd.logical_block_size() return 0
An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry.
Below is the the instruction that describes the task: ### Input: An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry. ### Response: def _update_rr_ce_entry(self, rec): # type: (dr.DirectoryRecord) -> int ''' An internal method to update the Rock Ridge CE entry for the given record. Parameters: rec - The record to update the Rock Ridge CE entry for (if it exists). Returns: The number of additional bytes needed for this Rock Ridge CE entry. ''' if rec.rock_ridge is not None and rec.rock_ridge.dr_entries.ce_record is not None: celen = rec.rock_ridge.dr_entries.ce_record.len_cont_area added_block, block, offset = self.pvd.add_rr_ce_entry(celen) rec.rock_ridge.update_ce_block(block) rec.rock_ridge.dr_entries.ce_record.update_offset(offset) if added_block: return self.pvd.logical_block_size() return 0
def build_header(self, title): """Generate the header for the Markdown file.""" header = ['---', 'title: ' + title, 'author(s): ' + self.user, 'tags: ', 'created_at: ' + str(self.date_created), 'updated_at: ' + str(self.date_updated), 'tldr: ', 'thumbnail: ', '---'] self.out = header + self.out
Generate the header for the Markdown file.
Below is the the instruction that describes the task: ### Input: Generate the header for the Markdown file. ### Response: def build_header(self, title): """Generate the header for the Markdown file.""" header = ['---', 'title: ' + title, 'author(s): ' + self.user, 'tags: ', 'created_at: ' + str(self.date_created), 'updated_at: ' + str(self.date_updated), 'tldr: ', 'thumbnail: ', '---'] self.out = header + self.out
def _set_keepalive_interval(self, v, load=False): """ Setter method for keepalive_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state/keepalive_interval (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_keepalive_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keepalive_interval() directly. YANG Description: Time interval in seconds between transmission of keepalive messages to the neighbor. Typically set to 1/3 the hold-time. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedPrecisionDecimalType(precision=2), default=Decimal(30), is_leaf=True, yang_name="keepalive-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """keepalive_interval must be of a type compatible with decimal64""", "defined-type": "decimal64", "generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), default=Decimal(30), is_leaf=True, yang_name="keepalive-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""", } ) self.__keepalive_interval = t if hasattr(self, "_set"): self._set()
Setter method for keepalive_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state/keepalive_interval (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_keepalive_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keepalive_interval() directly. YANG Description: Time interval in seconds between transmission of keepalive messages to the neighbor. Typically set to 1/3 the hold-time.
Below is the the instruction that describes the task: ### Input: Setter method for keepalive_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state/keepalive_interval (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_keepalive_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keepalive_interval() directly. YANG Description: Time interval in seconds between transmission of keepalive messages to the neighbor. Typically set to 1/3 the hold-time. ### Response: def _set_keepalive_interval(self, v, load=False): """ Setter method for keepalive_interval, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/timers/state/keepalive_interval (decimal64) If this variable is read-only (config: false) in the source YANG file, then _set_keepalive_interval is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_keepalive_interval() directly. YANG Description: Time interval in seconds between transmission of keepalive messages to the neighbor. Typically set to 1/3 the hold-time. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedPrecisionDecimalType(precision=2), default=Decimal(30), is_leaf=True, yang_name="keepalive-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="decimal64", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """keepalive_interval must be of a type compatible with decimal64""", "defined-type": "decimal64", "generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), default=Decimal(30), is_leaf=True, yang_name="keepalive-interval", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=False)""", } ) self.__keepalive_interval = t if hasattr(self, "_set"): self._set()
def posterior(self, x): """Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._post is None: return self._posterior(x) x = np.array(x, ndmin=1) return self._post_interp(x)
Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed.
Below is the the instruction that describes the task: ### Input: Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. ### Response: def posterior(self, x): """Posterior function. Returns ``P(x) = \int L(x,y|z') L(y) dy / \int L(x,y|z') L(y) dx dy`` This will used the cached '~fermipy.castro.Interpolator' object if possible, and construct it if needed. """ if self._post is None: return self._posterior(x) x = np.array(x, ndmin=1) return self._post_interp(x)
def intersection(self, other: 'FileLineSet') -> 'FileLineSet': """ Returns a set of file lines that contains the intersection of the lines within this set and a given set. """ assert isinstance(other, FileLineSet) set_self = set(self) set_other = set(other) set_union = set_self & set_other return FileLineSet.from_list(list(set_union))
Returns a set of file lines that contains the intersection of the lines within this set and a given set.
Below is the the instruction that describes the task: ### Input: Returns a set of file lines that contains the intersection of the lines within this set and a given set. ### Response: def intersection(self, other: 'FileLineSet') -> 'FileLineSet': """ Returns a set of file lines that contains the intersection of the lines within this set and a given set. """ assert isinstance(other, FileLineSet) set_self = set(self) set_other = set(other) set_union = set_self & set_other return FileLineSet.from_list(list(set_union))
def all_but_axis(i, axis, num_axes): """ Return a slice covering all combinations with coordinate i along axis. (Effectively the hyperplane perpendicular to axis at i.) """ the_slice = () for j in range(num_axes): if j == axis: the_slice = the_slice + (i,) else: the_slice = the_slice + (slice(None),) return the_slice
Return a slice covering all combinations with coordinate i along axis. (Effectively the hyperplane perpendicular to axis at i.)
Below is the the instruction that describes the task: ### Input: Return a slice covering all combinations with coordinate i along axis. (Effectively the hyperplane perpendicular to axis at i.) ### Response: def all_but_axis(i, axis, num_axes): """ Return a slice covering all combinations with coordinate i along axis. (Effectively the hyperplane perpendicular to axis at i.) """ the_slice = () for j in range(num_axes): if j == axis: the_slice = the_slice + (i,) else: the_slice = the_slice + (slice(None),) return the_slice
def VariantDir(self, variant_dir, src_dir, duplicate=1): """Link the supplied variant directory to the source directory for purposes of building files.""" if not isinstance(src_dir, SCons.Node.Node): src_dir = self.Dir(src_dir) if not isinstance(variant_dir, SCons.Node.Node): variant_dir = self.Dir(variant_dir) if src_dir.is_under(variant_dir): raise SCons.Errors.UserError("Source directory cannot be under variant directory.") if variant_dir.srcdir: if variant_dir.srcdir == src_dir: return # We already did this. raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir)) variant_dir.link(src_dir, duplicate)
Link the supplied variant directory to the source directory for purposes of building files.
Below is the the instruction that describes the task: ### Input: Link the supplied variant directory to the source directory for purposes of building files. ### Response: def VariantDir(self, variant_dir, src_dir, duplicate=1): """Link the supplied variant directory to the source directory for purposes of building files.""" if not isinstance(src_dir, SCons.Node.Node): src_dir = self.Dir(src_dir) if not isinstance(variant_dir, SCons.Node.Node): variant_dir = self.Dir(variant_dir) if src_dir.is_under(variant_dir): raise SCons.Errors.UserError("Source directory cannot be under variant directory.") if variant_dir.srcdir: if variant_dir.srcdir == src_dir: return # We already did this. raise SCons.Errors.UserError("'%s' already has a source directory: '%s'."%(variant_dir, variant_dir.srcdir)) variant_dir.link(src_dir, duplicate)
def add(self, email_address, name): """Adds an administrator to an account.""" body = { "EmailAddress": email_address, "Name": name} response = self._post("/admins.json", json.dumps(body)) return json_to_py(response)
Adds an administrator to an account.
Below is the the instruction that describes the task: ### Input: Adds an administrator to an account. ### Response: def add(self, email_address, name): """Adds an administrator to an account.""" body = { "EmailAddress": email_address, "Name": name} response = self._post("/admins.json", json.dumps(body)) return json_to_py(response)
def get_accept_list(self, request): """ Given the incoming request, return a tokenised list of media type strings. """ header = request.META.get('HTTP_ACCEPT', '*/*') return [token.strip() for token in header.split(',')]
Given the incoming request, return a tokenised list of media type strings.
Below is the the instruction that describes the task: ### Input: Given the incoming request, return a tokenised list of media type strings. ### Response: def get_accept_list(self, request): """ Given the incoming request, return a tokenised list of media type strings. """ header = request.META.get('HTTP_ACCEPT', '*/*') return [token.strip() for token in header.split(',')]
def set_data(self, frames): """ Prepare the input of model """ data_frames = [] for frame in frames: #frame H x W x C frame = frame.swapaxes(0, 1) # swap width and height to form format W x H x C if len(frame.shape) < 3: frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1) # Add grayscale channel data_frames.append(frame) frames_n = len(data_frames) data_frames = np.array(data_frames) # T x W x H x C data_frames = np.rollaxis(data_frames, 3) # C x T x W x H data_frames = data_frames.swapaxes(2, 3) # C x T x H x W = NCDHW self.data = data_frames self.length = frames_n
Prepare the input of model
Below is the the instruction that describes the task: ### Input: Prepare the input of model ### Response: def set_data(self, frames): """ Prepare the input of model """ data_frames = [] for frame in frames: #frame H x W x C frame = frame.swapaxes(0, 1) # swap width and height to form format W x H x C if len(frame.shape) < 3: frame = np.array([frame]).swapaxes(0, 2).swapaxes(0, 1) # Add grayscale channel data_frames.append(frame) frames_n = len(data_frames) data_frames = np.array(data_frames) # T x W x H x C data_frames = np.rollaxis(data_frames, 3) # C x T x W x H data_frames = data_frames.swapaxes(2, 3) # C x T x H x W = NCDHW self.data = data_frames self.length = frames_n
def getLocalTime(utc_dt, tz): """Return local timezone time """ import pytz local_tz = pytz.timezone(tz) local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_dt
Return local timezone time
Below is the the instruction that describes the task: ### Input: Return local timezone time ### Response: def getLocalTime(utc_dt, tz): """Return local timezone time """ import pytz local_tz = pytz.timezone(tz) local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz) return local_dt
def htmlCreateFileParserCtxt(filename, encoding): """Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. """ ret = libxml2mod.htmlCreateFileParserCtxt(filename, encoding) if ret is None:raise parserError('htmlCreateFileParserCtxt() failed') return parserCtxt(_obj=ret)
Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time.
Below is the the instruction that describes the task: ### Input: Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. ### Response: def htmlCreateFileParserCtxt(filename, encoding): """Create a parser context for a file content. Automatic support for ZLIB/Compress compressed document is provided by default if found at compile-time. """ ret = libxml2mod.htmlCreateFileParserCtxt(filename, encoding) if ret is None:raise parserError('htmlCreateFileParserCtxt() failed') return parserCtxt(_obj=ret)
def publish_scene_velocity(self, scene_id, velocity): """publish a changed scene velovity""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_velocity(self.sequence_number, scene_id, velocity)) return self.sequence_number
publish a changed scene velovity
Below is the the instruction that describes the task: ### Input: publish a changed scene velovity ### Response: def publish_scene_velocity(self, scene_id, velocity): """publish a changed scene velovity""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_velocity(self.sequence_number, scene_id, velocity)) return self.sequence_number
def forModule(cls, name): """ Return an instance of this class representing the module of the given name. If the given module name is "__main__", it will be translated to the actual file name of the top-level script without the .py or .pyc extension. This method assumes that the module with the specified name has already been loaded. """ module = sys.modules[name] filePath = os.path.abspath(module.__file__) filePath = filePath.split(os.path.sep) filePath[-1], extension = os.path.splitext(filePath[-1]) if not extension in ('.py', '.pyc'): raise Exception('The name of a user script/module must end in .py or .pyc.') if name == '__main__': log.debug("Discovering real name of module") # User script/module was invoked as the main program if module.__package__: # Invoked as a module via python -m foo.bar log.debug("Script was invoked as a module") name = [filePath.pop()] for package in reversed(module.__package__.split('.')): dirPathTail = filePath.pop() assert dirPathTail == package name.append(dirPathTail) name = '.'.join(reversed(name)) dirPath = os.path.sep.join(filePath) else: # Invoked as a script via python foo/bar.py name = filePath.pop() dirPath = os.path.sep.join(filePath) cls._check_conflict(dirPath, name) else: # User module was imported. Determine the directory containing the top-level package if filePath[-1] == '__init__': # module is a subpackage filePath.pop() for package in reversed(name.split('.')): dirPathTail = filePath.pop() assert dirPathTail == package dirPath = os.path.sep.join(filePath) log.debug("Module dir is %s", dirPath) if not os.path.isdir(dirPath): raise Exception('Bad directory path %s for module %s. Note that hot-deployment does not support .egg-link files yet, or scripts located in the root directory.' % (dirPath, name)) fromVirtualEnv = inVirtualEnv() and dirPath.startswith(sys.prefix) return cls(dirPath=dirPath, name=name, fromVirtualEnv=fromVirtualEnv)
Return an instance of this class representing the module of the given name. If the given module name is "__main__", it will be translated to the actual file name of the top-level script without the .py or .pyc extension. This method assumes that the module with the specified name has already been loaded.
Below is the the instruction that describes the task: ### Input: Return an instance of this class representing the module of the given name. If the given module name is "__main__", it will be translated to the actual file name of the top-level script without the .py or .pyc extension. This method assumes that the module with the specified name has already been loaded. ### Response: def forModule(cls, name): """ Return an instance of this class representing the module of the given name. If the given module name is "__main__", it will be translated to the actual file name of the top-level script without the .py or .pyc extension. This method assumes that the module with the specified name has already been loaded. """ module = sys.modules[name] filePath = os.path.abspath(module.__file__) filePath = filePath.split(os.path.sep) filePath[-1], extension = os.path.splitext(filePath[-1]) if not extension in ('.py', '.pyc'): raise Exception('The name of a user script/module must end in .py or .pyc.') if name == '__main__': log.debug("Discovering real name of module") # User script/module was invoked as the main program if module.__package__: # Invoked as a module via python -m foo.bar log.debug("Script was invoked as a module") name = [filePath.pop()] for package in reversed(module.__package__.split('.')): dirPathTail = filePath.pop() assert dirPathTail == package name.append(dirPathTail) name = '.'.join(reversed(name)) dirPath = os.path.sep.join(filePath) else: # Invoked as a script via python foo/bar.py name = filePath.pop() dirPath = os.path.sep.join(filePath) cls._check_conflict(dirPath, name) else: # User module was imported. Determine the directory containing the top-level package if filePath[-1] == '__init__': # module is a subpackage filePath.pop() for package in reversed(name.split('.')): dirPathTail = filePath.pop() assert dirPathTail == package dirPath = os.path.sep.join(filePath) log.debug("Module dir is %s", dirPath) if not os.path.isdir(dirPath): raise Exception('Bad directory path %s for module %s. Note that hot-deployment does not support .egg-link files yet, or scripts located in the root directory.' % (dirPath, name)) fromVirtualEnv = inVirtualEnv() and dirPath.startswith(sys.prefix) return cls(dirPath=dirPath, name=name, fromVirtualEnv=fromVirtualEnv)
def segwit_addr_decode(addr, hrp=bech32_prefix): """ Decode a segwit address. Returns (version, hash_bin) on success Returns (None, None) on error """ hrpgot, data = bech32_decode(addr) if hrpgot != hrp: return (None, None) decoded = convertbits(data[1:], 5, 8, False) if decoded is None or len(decoded) < 2 or len(decoded) > 40: return (None, None) if data[0] > 16: return (None, None) if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32: return (None, None) return (data[0], ''.join([chr(x) for x in decoded]))
Decode a segwit address. Returns (version, hash_bin) on success Returns (None, None) on error
Below is the the instruction that describes the task: ### Input: Decode a segwit address. Returns (version, hash_bin) on success Returns (None, None) on error ### Response: def segwit_addr_decode(addr, hrp=bech32_prefix): """ Decode a segwit address. Returns (version, hash_bin) on success Returns (None, None) on error """ hrpgot, data = bech32_decode(addr) if hrpgot != hrp: return (None, None) decoded = convertbits(data[1:], 5, 8, False) if decoded is None or len(decoded) < 2 or len(decoded) > 40: return (None, None) if data[0] > 16: return (None, None) if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32: return (None, None) return (data[0], ''.join([chr(x) for x in decoded]))
def parse_workflow_call_body_declarations(self, i): """ Have not seen this used, so expects to return "[]". :param i: :return: """ declaration_array = [] if isinstance(i, wdl_parser.Terminal): declaration_array = [i.source_string] elif isinstance(i, wdl_parser.Ast): raise NotImplementedError elif isinstance(i, wdl_parser.AstList): for ast in i: declaration_array.append(self.parse_task_declaration(ast)) # have not seen this used so raise to check if declaration_array: raise NotImplementedError return declaration_array
Have not seen this used, so expects to return "[]". :param i: :return:
Below is the the instruction that describes the task: ### Input: Have not seen this used, so expects to return "[]". :param i: :return: ### Response: def parse_workflow_call_body_declarations(self, i): """ Have not seen this used, so expects to return "[]". :param i: :return: """ declaration_array = [] if isinstance(i, wdl_parser.Terminal): declaration_array = [i.source_string] elif isinstance(i, wdl_parser.Ast): raise NotImplementedError elif isinstance(i, wdl_parser.AstList): for ast in i: declaration_array.append(self.parse_task_declaration(ast)) # have not seen this used so raise to check if declaration_array: raise NotImplementedError return declaration_array
def fromTuple(self, locationTuple): """ Read the coordinates from a tuple. :: >>> t = (('pop', 1), ('snap', -100)) >>> l = Location() >>> l.fromTuple(t) >>> print(l) <Location pop:1, snap:-100 > """ for key, value in locationTuple: try: self[key] = float(value) except TypeError: self[key] = tuple([float(v) for v in value])
Read the coordinates from a tuple. :: >>> t = (('pop', 1), ('snap', -100)) >>> l = Location() >>> l.fromTuple(t) >>> print(l) <Location pop:1, snap:-100 >
Below is the the instruction that describes the task: ### Input: Read the coordinates from a tuple. :: >>> t = (('pop', 1), ('snap', -100)) >>> l = Location() >>> l.fromTuple(t) >>> print(l) <Location pop:1, snap:-100 > ### Response: def fromTuple(self, locationTuple): """ Read the coordinates from a tuple. :: >>> t = (('pop', 1), ('snap', -100)) >>> l = Location() >>> l.fromTuple(t) >>> print(l) <Location pop:1, snap:-100 > """ for key, value in locationTuple: try: self[key] = float(value) except TypeError: self[key] = tuple([float(v) for v in value])
def __make_hash(cls, innermsg, token, seqnum): """return the hash for this innermsg, token, seqnum return digest bytes """ hobj = hmacNew(token, digestmod=hashfunc) hobj.update(innermsg) hobj.update(cls.__byte_packer(seqnum)) return hobj.digest()
return the hash for this innermsg, token, seqnum return digest bytes
Below is the the instruction that describes the task: ### Input: return the hash for this innermsg, token, seqnum return digest bytes ### Response: def __make_hash(cls, innermsg, token, seqnum): """return the hash for this innermsg, token, seqnum return digest bytes """ hobj = hmacNew(token, digestmod=hashfunc) hobj.update(innermsg) hobj.update(cls.__byte_packer(seqnum)) return hobj.digest()
def toxml(self): """ Exports this object into a LEMS XML object """ chxmlstr = '' for event_connection in self.event_connections: chxmlstr += event_connection.toxml() for for_each in self.for_eachs: chxmlstr += for_each.toxml() return '<ForEach instances="{0}" as="{1}">{2}</ForEach>'.format(self.instances, self.as_, chxmlstr)
Exports this object into a LEMS XML object
Below is the the instruction that describes the task: ### Input: Exports this object into a LEMS XML object ### Response: def toxml(self): """ Exports this object into a LEMS XML object """ chxmlstr = '' for event_connection in self.event_connections: chxmlstr += event_connection.toxml() for for_each in self.for_eachs: chxmlstr += for_each.toxml() return '<ForEach instances="{0}" as="{1}">{2}</ForEach>'.format(self.instances, self.as_, chxmlstr)
def concatenate(self, other, *args): """ Concatenate two or more ColorVisuals objects into a single object. Parameters ----------- other : ColorVisuals Object to append *args: ColorVisuals objects Returns ----------- result: ColorVisuals object containing information from current object and others in the order it was passed. """ # avoid a circular import from . import objects result = objects.concatenate(self, other, *args) return result
Concatenate two or more ColorVisuals objects into a single object. Parameters ----------- other : ColorVisuals Object to append *args: ColorVisuals objects Returns ----------- result: ColorVisuals object containing information from current object and others in the order it was passed.
Below is the the instruction that describes the task: ### Input: Concatenate two or more ColorVisuals objects into a single object. Parameters ----------- other : ColorVisuals Object to append *args: ColorVisuals objects Returns ----------- result: ColorVisuals object containing information from current object and others in the order it was passed. ### Response: def concatenate(self, other, *args): """ Concatenate two or more ColorVisuals objects into a single object. Parameters ----------- other : ColorVisuals Object to append *args: ColorVisuals objects Returns ----------- result: ColorVisuals object containing information from current object and others in the order it was passed. """ # avoid a circular import from . import objects result = objects.concatenate(self, other, *args) return result
def value_from_ast_untyped( value_node: ValueNode, variables: Dict[str, Any] = None ) -> Any: """Produce a Python value given a GraphQL Value AST. Unlike `value_from_ast()`, no type is provided. The resulting Python value will reflect the provided GraphQL value AST. | GraphQL Value | JSON Value | Python Value | | -------------------- | ---------- | ------------ | | Input Object | Object | dict | | List | Array | list | | Boolean | Boolean | bool | | String / Enum | String | str | | Int / Float | Number | int / float | | Null | null | None | """ func = _value_from_kind_functions.get(value_node.kind) if func: return func(value_node, variables) # Not reachable. All possible value nodes have been considered. raise TypeError( # pragma: no cover f"Unexpected value node: '{inspect(value_node)}'." )
Produce a Python value given a GraphQL Value AST. Unlike `value_from_ast()`, no type is provided. The resulting Python value will reflect the provided GraphQL value AST. | GraphQL Value | JSON Value | Python Value | | -------------------- | ---------- | ------------ | | Input Object | Object | dict | | List | Array | list | | Boolean | Boolean | bool | | String / Enum | String | str | | Int / Float | Number | int / float | | Null | null | None |
Below is the the instruction that describes the task: ### Input: Produce a Python value given a GraphQL Value AST. Unlike `value_from_ast()`, no type is provided. The resulting Python value will reflect the provided GraphQL value AST. | GraphQL Value | JSON Value | Python Value | | -------------------- | ---------- | ------------ | | Input Object | Object | dict | | List | Array | list | | Boolean | Boolean | bool | | String / Enum | String | str | | Int / Float | Number | int / float | | Null | null | None | ### Response: def value_from_ast_untyped( value_node: ValueNode, variables: Dict[str, Any] = None ) -> Any: """Produce a Python value given a GraphQL Value AST. Unlike `value_from_ast()`, no type is provided. The resulting Python value will reflect the provided GraphQL value AST. | GraphQL Value | JSON Value | Python Value | | -------------------- | ---------- | ------------ | | Input Object | Object | dict | | List | Array | list | | Boolean | Boolean | bool | | String / Enum | String | str | | Int / Float | Number | int / float | | Null | null | None | """ func = _value_from_kind_functions.get(value_node.kind) if func: return func(value_node, variables) # Not reachable. All possible value nodes have been considered. raise TypeError( # pragma: no cover f"Unexpected value node: '{inspect(value_node)}'." )
def create(cls, path_name=None, name=None, project_id=None, log_modified_at=None, crawlable=True): """Initialize an instance and save it to db.""" result = cls(path_name, name, project_id, log_modified_at, crawlable) db.session.add(result) db.session.commit() crawl_result(result, True) return result
Initialize an instance and save it to db.
Below is the the instruction that describes the task: ### Input: Initialize an instance and save it to db. ### Response: def create(cls, path_name=None, name=None, project_id=None, log_modified_at=None, crawlable=True): """Initialize an instance and save it to db.""" result = cls(path_name, name, project_id, log_modified_at, crawlable) db.session.add(result) db.session.commit() crawl_result(result, True) return result
def main(): """ Slowly writes to stdout, without emitting a newline so any output buffering (or input for next pipeline command) can be detected. """ now = datetime.datetime.now try: while True: sys.stdout.write(str(now()) + ' ') time.sleep(1) except KeyboardInterrupt: pass except IOError as exc: if exc.errno != errno.EPIPE: raise
Slowly writes to stdout, without emitting a newline so any output buffering (or input for next pipeline command) can be detected.
Below is the the instruction that describes the task: ### Input: Slowly writes to stdout, without emitting a newline so any output buffering (or input for next pipeline command) can be detected. ### Response: def main(): """ Slowly writes to stdout, without emitting a newline so any output buffering (or input for next pipeline command) can be detected. """ now = datetime.datetime.now try: while True: sys.stdout.write(str(now()) + ' ') time.sleep(1) except KeyboardInterrupt: pass except IOError as exc: if exc.errno != errno.EPIPE: raise
def _encode(data): """Encode the given data using base-64 :param data: :return: base-64 encoded string """ if not isinstance(data, bytes_types): data = six.b(str(data)) return base64.b64encode(data).decode("utf-8")
Encode the given data using base-64 :param data: :return: base-64 encoded string
Below is the the instruction that describes the task: ### Input: Encode the given data using base-64 :param data: :return: base-64 encoded string ### Response: def _encode(data): """Encode the given data using base-64 :param data: :return: base-64 encoded string """ if not isinstance(data, bytes_types): data = six.b(str(data)) return base64.b64encode(data).decode("utf-8")
def get_facts(self): """Return a set of facts from the devices.""" # default values. vendor = u'Cisco' uptime = -1 serial_number, fqdn, os_version, hostname, domain_name = ('',) * 5 # obtain output from device show_ver = self.device.send_command('show version') show_hosts = self.device.send_command('show hosts') show_int_status = self.device.send_command('show interface status') show_hostname = self.device.send_command('show hostname') # uptime/serial_number/IOS version for line in show_ver.splitlines(): if ' uptime is ' in line: _, uptime_str = line.split(' uptime is ') uptime = self.parse_uptime(uptime_str) if 'Processor Board ID' in line: _, serial_number = line.split("Processor Board ID ") serial_number = serial_number.strip() if 'system: ' in line: line = line.strip() os_version = line.split()[2] os_version = os_version.strip() if 'cisco' in line and 'Chassis' in line: _, model = line.split()[:2] model = model.strip() hostname = show_hostname.strip() # Determine domain_name and fqdn for line in show_hosts.splitlines(): if 'Default domain' in line: _, domain_name = re.split(r".*Default domain.*is ", line) domain_name = domain_name.strip() break if hostname.count(".") >= 2: fqdn = hostname elif domain_name: fqdn = '{}.{}'.format(hostname, domain_name) # interface_list filter interface_list = [] show_int_status = show_int_status.strip() for line in show_int_status.splitlines(): if line.startswith(' ') or line.startswith('-') or line.startswith('Port '): continue interface = line.split()[0] interface_list.append(interface) return { 'uptime': int(uptime), 'vendor': vendor, 'os_version': py23_compat.text_type(os_version), 'serial_number': py23_compat.text_type(serial_number), 'model': py23_compat.text_type(model), 'hostname': py23_compat.text_type(hostname), 'fqdn': fqdn, 'interface_list': interface_list }
Return a set of facts from the devices.
Below is the the instruction that describes the task: ### Input: Return a set of facts from the devices. ### Response: def get_facts(self): """Return a set of facts from the devices.""" # default values. vendor = u'Cisco' uptime = -1 serial_number, fqdn, os_version, hostname, domain_name = ('',) * 5 # obtain output from device show_ver = self.device.send_command('show version') show_hosts = self.device.send_command('show hosts') show_int_status = self.device.send_command('show interface status') show_hostname = self.device.send_command('show hostname') # uptime/serial_number/IOS version for line in show_ver.splitlines(): if ' uptime is ' in line: _, uptime_str = line.split(' uptime is ') uptime = self.parse_uptime(uptime_str) if 'Processor Board ID' in line: _, serial_number = line.split("Processor Board ID ") serial_number = serial_number.strip() if 'system: ' in line: line = line.strip() os_version = line.split()[2] os_version = os_version.strip() if 'cisco' in line and 'Chassis' in line: _, model = line.split()[:2] model = model.strip() hostname = show_hostname.strip() # Determine domain_name and fqdn for line in show_hosts.splitlines(): if 'Default domain' in line: _, domain_name = re.split(r".*Default domain.*is ", line) domain_name = domain_name.strip() break if hostname.count(".") >= 2: fqdn = hostname elif domain_name: fqdn = '{}.{}'.format(hostname, domain_name) # interface_list filter interface_list = [] show_int_status = show_int_status.strip() for line in show_int_status.splitlines(): if line.startswith(' ') or line.startswith('-') or line.startswith('Port '): continue interface = line.split()[0] interface_list.append(interface) return { 'uptime': int(uptime), 'vendor': vendor, 'os_version': py23_compat.text_type(os_version), 'serial_number': py23_compat.text_type(serial_number), 'model': py23_compat.text_type(model), 'hostname': py23_compat.text_type(hostname), 'fqdn': fqdn, 'interface_list': interface_list }
def resolve(self, path, strict): """Make the path absolute, resolving any symlinks.""" if self.filesystem.is_windows_fs: return self._resolve_windows(path, strict) return self._resolve_posix(path, strict)
Make the path absolute, resolving any symlinks.
Below is the the instruction that describes the task: ### Input: Make the path absolute, resolving any symlinks. ### Response: def resolve(self, path, strict): """Make the path absolute, resolving any symlinks.""" if self.filesystem.is_windows_fs: return self._resolve_windows(path, strict) return self._resolve_posix(path, strict)
def build_kernel(self): """Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value """ if self.precomputed == "affinity": # already done # TODO: should we check that precomputed matrices look okay? # e.g. check the diagonal K = self.data_nu elif self.precomputed == "adjacency": # need to set diagonal to one to make it an affinity matrix K = self.data_nu if sparse.issparse(K) and \ not (isinstance(K, sparse.dok_matrix) or isinstance(K, sparse.lil_matrix)): K = K.tolil() K = set_diagonal(K, 1) else: tasklogger.log_start("affinities") if sparse.issparse(self.data_nu): self.data_nu = self.data_nu.toarray() if self.precomputed == "distance": pdx = self.data_nu elif self.precomputed is None: pdx = pdist(self.data_nu, metric=self.distance) if np.any(pdx == 0): pdx = squareform(pdx) duplicate_ids = np.array( [i for i in np.argwhere(pdx == 0) if i[1] > i[0]]) duplicate_names = ", ".join(["{} and {}".format(i[0], i[1]) for i in duplicate_ids]) warnings.warn( "Detected zero distance between samples {}. " "Consider removing duplicates to avoid errors in " "downstream processing.".format(duplicate_names), RuntimeWarning) else: pdx = squareform(pdx) else: raise ValueError( "precomputed='{}' not recognized. " "Choose from ['affinity', 'adjacency', 'distance', " "None]".format(self.precomputed)) if self.bandwidth is None: knn_dist = np.partition( pdx, self.knn + 1, axis=1)[:, :self.knn + 1] bandwidth = np.max(knn_dist, axis=1) elif callable(self.bandwidth): bandwidth = self.bandwidth(pdx) else: bandwidth = self.bandwidth bandwidth = bandwidth * self.bandwidth_scale pdx = (pdx.T / bandwidth).T K = np.exp(-1 * np.power(pdx, self.decay)) # handle nan K = np.where(np.isnan(K), 1, K) tasklogger.log_complete("affinities") # truncate if sparse.issparse(K): if not (isinstance(K, sparse.csr_matrix) or isinstance(K, sparse.csc_matrix) or isinstance(K, sparse.bsr_matrix)): K = K.tocsr() K.data[K.data < self.thresh] = 0 K = K.tocoo() K.eliminate_zeros() K = K.tocsr() else: K[K < self.thresh] = 0 return K
Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value
Below is the the instruction that describes the task: ### Input: Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value ### Response: def build_kernel(self): """Build the KNN kernel. Build a k nearest neighbors kernel, optionally with alpha decay. If `precomputed` is not `None`, the appropriate steps in the kernel building process are skipped. Must return a symmetric matrix Returns ------- K : kernel matrix, shape=[n_samples, n_samples] symmetric matrix with ones down the diagonal with no non-negative entries. Raises ------ ValueError: if `precomputed` is not an acceptable value """ if self.precomputed == "affinity": # already done # TODO: should we check that precomputed matrices look okay? # e.g. check the diagonal K = self.data_nu elif self.precomputed == "adjacency": # need to set diagonal to one to make it an affinity matrix K = self.data_nu if sparse.issparse(K) and \ not (isinstance(K, sparse.dok_matrix) or isinstance(K, sparse.lil_matrix)): K = K.tolil() K = set_diagonal(K, 1) else: tasklogger.log_start("affinities") if sparse.issparse(self.data_nu): self.data_nu = self.data_nu.toarray() if self.precomputed == "distance": pdx = self.data_nu elif self.precomputed is None: pdx = pdist(self.data_nu, metric=self.distance) if np.any(pdx == 0): pdx = squareform(pdx) duplicate_ids = np.array( [i for i in np.argwhere(pdx == 0) if i[1] > i[0]]) duplicate_names = ", ".join(["{} and {}".format(i[0], i[1]) for i in duplicate_ids]) warnings.warn( "Detected zero distance between samples {}. " "Consider removing duplicates to avoid errors in " "downstream processing.".format(duplicate_names), RuntimeWarning) else: pdx = squareform(pdx) else: raise ValueError( "precomputed='{}' not recognized. " "Choose from ['affinity', 'adjacency', 'distance', " "None]".format(self.precomputed)) if self.bandwidth is None: knn_dist = np.partition( pdx, self.knn + 1, axis=1)[:, :self.knn + 1] bandwidth = np.max(knn_dist, axis=1) elif callable(self.bandwidth): bandwidth = self.bandwidth(pdx) else: bandwidth = self.bandwidth bandwidth = bandwidth * self.bandwidth_scale pdx = (pdx.T / bandwidth).T K = np.exp(-1 * np.power(pdx, self.decay)) # handle nan K = np.where(np.isnan(K), 1, K) tasklogger.log_complete("affinities") # truncate if sparse.issparse(K): if not (isinstance(K, sparse.csr_matrix) or isinstance(K, sparse.csc_matrix) or isinstance(K, sparse.bsr_matrix)): K = K.tocsr() K.data[K.data < self.thresh] = 0 K = K.tocoo() K.eliminate_zeros() K = K.tocsr() else: K[K < self.thresh] = 0 return K
def vxi_command_query(library, session, mode, command): """Sends the device a miscellaneous command or query and/or retrieves the response to a previous query. Corresponds to viVxiCommandQuery function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*) :param command: The miscellaneous command to send. :return: The response retrieved from the device, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ response = ViUInt32() ret = library.viVxiCommandQuery(session, mode, command, byref(response)) return response.value, ret
Sends the device a miscellaneous command or query and/or retrieves the response to a previous query. Corresponds to viVxiCommandQuery function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*) :param command: The miscellaneous command to send. :return: The response retrieved from the device, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode`
Below is the the instruction that describes the task: ### Input: Sends the device a miscellaneous command or query and/or retrieves the response to a previous query. Corresponds to viVxiCommandQuery function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*) :param command: The miscellaneous command to send. :return: The response retrieved from the device, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` ### Response: def vxi_command_query(library, session, mode, command): """Sends the device a miscellaneous command or query and/or retrieves the response to a previous query. Corresponds to viVxiCommandQuery function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param mode: Specifies whether to issue a command and/or retrieve a response. (Constants.VXI_CMD*, .VXI_RESP*) :param command: The miscellaneous command to send. :return: The response retrieved from the device, return value of the library call. :rtype: int, :class:`pyvisa.constants.StatusCode` """ response = ViUInt32() ret = library.viVxiCommandQuery(session, mode, command, byref(response)) return response.value, ret
def abort(code, error=None, message=None): """ Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message """ if error is None: flask_abort(code) elif isinstance(error, Response): error.status_code = code flask_abort(code, response=error) else: body = { "status": code, "error": error, "message": message } flask_abort(code, response=export(body, code))
Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message
Below is the the instruction that describes the task: ### Input: Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message ### Response: def abort(code, error=None, message=None): """ Abort with suitable error response Args: code (int): status code error (str): error symbol or flask.Response message (str): error message """ if error is None: flask_abort(code) elif isinstance(error, Response): error.status_code = code flask_abort(code, response=error) else: body = { "status": code, "error": error, "message": message } flask_abort(code, response=export(body, code))
def markdown_cell(markdown): r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result) """ import utool as ut markdown_header = ut.codeblock( ''' { "cell_type": "markdown", "metadata": {}, "source": [ ''' ) markdown_footer = ut.codeblock( ''' ] } ''' ) return (markdown_header + '\n' + ut.indent(repr_single_for_md(markdown), ' ' * 2) + '\n' + markdown_footer)
r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result)
Below is the the instruction that describes the task: ### Input: r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result) ### Response: def markdown_cell(markdown): r""" Args: markdown (str): Returns: str: json formatted ipython notebook markdown cell CommandLine: python -m ibeis.templates.generate_notebook --exec-markdown_cell Example: >>> # DISABLE_DOCTEST >>> from ibeis.templates.generate_notebook import * # NOQA >>> markdown = '# Title' >>> result = markdown_cell(markdown) >>> print(result) """ import utool as ut markdown_header = ut.codeblock( ''' { "cell_type": "markdown", "metadata": {}, "source": [ ''' ) markdown_footer = ut.codeblock( ''' ] } ''' ) return (markdown_header + '\n' + ut.indent(repr_single_for_md(markdown), ' ' * 2) + '\n' + markdown_footer)
def _default_cleanup(self): """Default cleanup after :meth:`run`. For in-place builds, moves the built shared library into the source directory. """ if not self.inplace: return shutil.move( os.path.join(self.build_lib, "bezier", "lib"), os.path.join("src", "bezier"), )
Default cleanup after :meth:`run`. For in-place builds, moves the built shared library into the source directory.
Below is the the instruction that describes the task: ### Input: Default cleanup after :meth:`run`. For in-place builds, moves the built shared library into the source directory. ### Response: def _default_cleanup(self): """Default cleanup after :meth:`run`. For in-place builds, moves the built shared library into the source directory. """ if not self.inplace: return shutil.move( os.path.join(self.build_lib, "bezier", "lib"), os.path.join("src", "bezier"), )
def Parser(grammar, **actions): r"""Make a parsing function from a peglet grammar, defining the grammar's semantic actions with keyword arguments. The parsing function maps a string to a results tuple or raises Unparsable. (It can optionally take a rule name to start from, by default the first in the grammar.) It doesn't necessarily match the whole input, just a prefix. >>> nums = Parser(r"nums = num ,\s* nums | num num = (\d+) int", int=int) >>> nums('42, 137, and 0 are magic numbers') (42, 137) >>> nums('The magic numbers are 42, 137, and 0') Traceback (most recent call last): Unparsable: ('nums', '', 'The magic numbers are 42, 137, and 0') """ parts = re.split(' ('+_identifier+') += ', ' '+re.sub(r'\s', ' ', grammar)) if len(parts) == 1 or parts[0].strip(): raise BadGrammar("Missing left hand side", parts[0]) if len(set(parts[1::2])) != len(parts[1::2]): raise BadGrammar("Multiply-defined rule(s)", grammar) rules = dict((lhs, [alt.split() for alt in (' '+rhs+' ').split(' | ')]) for lhs, rhs in zip(parts[1::2], parts[2::2])) return lambda text, rule=parts[1]: _parse(rules, actions, rule, text)
r"""Make a parsing function from a peglet grammar, defining the grammar's semantic actions with keyword arguments. The parsing function maps a string to a results tuple or raises Unparsable. (It can optionally take a rule name to start from, by default the first in the grammar.) It doesn't necessarily match the whole input, just a prefix. >>> nums = Parser(r"nums = num ,\s* nums | num num = (\d+) int", int=int) >>> nums('42, 137, and 0 are magic numbers') (42, 137) >>> nums('The magic numbers are 42, 137, and 0') Traceback (most recent call last): Unparsable: ('nums', '', 'The magic numbers are 42, 137, and 0')
Below is the the instruction that describes the task: ### Input: r"""Make a parsing function from a peglet grammar, defining the grammar's semantic actions with keyword arguments. The parsing function maps a string to a results tuple or raises Unparsable. (It can optionally take a rule name to start from, by default the first in the grammar.) It doesn't necessarily match the whole input, just a prefix. >>> nums = Parser(r"nums = num ,\s* nums | num num = (\d+) int", int=int) >>> nums('42, 137, and 0 are magic numbers') (42, 137) >>> nums('The magic numbers are 42, 137, and 0') Traceback (most recent call last): Unparsable: ('nums', '', 'The magic numbers are 42, 137, and 0') ### Response: def Parser(grammar, **actions): r"""Make a parsing function from a peglet grammar, defining the grammar's semantic actions with keyword arguments. The parsing function maps a string to a results tuple or raises Unparsable. (It can optionally take a rule name to start from, by default the first in the grammar.) It doesn't necessarily match the whole input, just a prefix. >>> nums = Parser(r"nums = num ,\s* nums | num num = (\d+) int", int=int) >>> nums('42, 137, and 0 are magic numbers') (42, 137) >>> nums('The magic numbers are 42, 137, and 0') Traceback (most recent call last): Unparsable: ('nums', '', 'The magic numbers are 42, 137, and 0') """ parts = re.split(' ('+_identifier+') += ', ' '+re.sub(r'\s', ' ', grammar)) if len(parts) == 1 or parts[0].strip(): raise BadGrammar("Missing left hand side", parts[0]) if len(set(parts[1::2])) != len(parts[1::2]): raise BadGrammar("Multiply-defined rule(s)", grammar) rules = dict((lhs, [alt.split() for alt in (' '+rhs+' ').split(' | ')]) for lhs, rhs in zip(parts[1::2], parts[2::2])) return lambda text, rule=parts[1]: _parse(rules, actions, rule, text)
def split_field_path(field_path): """Return the individual parts of a field path that may, apart from the fieldname, have label and subfield parts. Examples: 'start' -> ('start', None, None) 'phone_numbers__PrimaryPhone' -> ('phone_numbers', 'PrimaryPhone', None) 'physical_addresses__Home__street' -> ('physical_addresses', 'Home', 'street') """ if not isinstance(field_path, string_types): raise ValueError("Field path %r must be a string" % field_path) search_parts = field_path.split('__') field = search_parts[0] try: label = search_parts[1] except IndexError: label = None try: subfield = search_parts[2] except IndexError: subfield = None return field, label, subfield
Return the individual parts of a field path that may, apart from the fieldname, have label and subfield parts. Examples: 'start' -> ('start', None, None) 'phone_numbers__PrimaryPhone' -> ('phone_numbers', 'PrimaryPhone', None) 'physical_addresses__Home__street' -> ('physical_addresses', 'Home', 'street')
Below is the the instruction that describes the task: ### Input: Return the individual parts of a field path that may, apart from the fieldname, have label and subfield parts. Examples: 'start' -> ('start', None, None) 'phone_numbers__PrimaryPhone' -> ('phone_numbers', 'PrimaryPhone', None) 'physical_addresses__Home__street' -> ('physical_addresses', 'Home', 'street') ### Response: def split_field_path(field_path): """Return the individual parts of a field path that may, apart from the fieldname, have label and subfield parts. Examples: 'start' -> ('start', None, None) 'phone_numbers__PrimaryPhone' -> ('phone_numbers', 'PrimaryPhone', None) 'physical_addresses__Home__street' -> ('physical_addresses', 'Home', 'street') """ if not isinstance(field_path, string_types): raise ValueError("Field path %r must be a string" % field_path) search_parts = field_path.split('__') field = search_parts[0] try: label = search_parts[1] except IndexError: label = None try: subfield = search_parts[2] except IndexError: subfield = None return field, label, subfield
def _reserve(self, key): """ Reserve a component's binding temporarily. Protects against cycles. """ self.assign(key, RESERVED) try: yield finally: del self._cache[key]
Reserve a component's binding temporarily. Protects against cycles.
Below is the the instruction that describes the task: ### Input: Reserve a component's binding temporarily. Protects against cycles. ### Response: def _reserve(self, key): """ Reserve a component's binding temporarily. Protects against cycles. """ self.assign(key, RESERVED) try: yield finally: del self._cache[key]
def wrap_state_dict(self, typename: str, state) -> Dict[str, Any]: """ Wrap the marshalled state in a dictionary. The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key`` options. The former holds the type name and the latter holds the marshalled state. :param typename: registered name of the custom type :param state: the marshalled state of the object :return: an object serializable by the serializer """ return {self.type_key: typename, self.state_key: state}
Wrap the marshalled state in a dictionary. The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key`` options. The former holds the type name and the latter holds the marshalled state. :param typename: registered name of the custom type :param state: the marshalled state of the object :return: an object serializable by the serializer
Below is the the instruction that describes the task: ### Input: Wrap the marshalled state in a dictionary. The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key`` options. The former holds the type name and the latter holds the marshalled state. :param typename: registered name of the custom type :param state: the marshalled state of the object :return: an object serializable by the serializer ### Response: def wrap_state_dict(self, typename: str, state) -> Dict[str, Any]: """ Wrap the marshalled state in a dictionary. The returned dictionary has two keys, corresponding to the ``type_key`` and ``state_key`` options. The former holds the type name and the latter holds the marshalled state. :param typename: registered name of the custom type :param state: the marshalled state of the object :return: an object serializable by the serializer """ return {self.type_key: typename, self.state_key: state}
def copy_or_move_subcommand(action, vcard_list, target_address_book_list): """Copy or move a contact to a different address book. :action: the string "copy" or "move" to indicate what to do :type action: str :param vcard_list: the contact list from which to select one for the action :type vcard_list: list of carddav_object.CarddavObject :param target_address_book_list: the list of target address books :type target_address_book_list: list(addressbook.AddressBook) :returns: None :rtype: None """ # get the source vcard, which to copy or move source_vcard = choose_vcard_from_list( "Select contact to %s" % action.title(), vcard_list) if source_vcard is None: print("Found no contact") sys.exit(1) else: print("%s contact %s from address book %s" % (action.title(), source_vcard, source_vcard.address_book)) # get target address book if len(target_address_book_list) == 1 \ and target_address_book_list[0] == source_vcard.address_book: print("The address book %s already contains the contact %s" % (target_address_book_list[0], source_vcard)) sys.exit(1) else: available_address_books = [abook for abook in target_address_book_list if abook != source_vcard.address_book] selected_target_address_book = choose_address_book_from_list( "Select target address book", available_address_books) if selected_target_address_book is None: print("Error: address book list is empty") sys.exit(1) # check if a contact already exists in the target address book target_vcard = choose_vcard_from_list( "Select target contact which to overwrite", get_contact_list_by_user_selection([selected_target_address_book], source_vcard.get_full_name(), True)) # If the target contact doesn't exist, move or copy the source contact into # the target address book without further questions. if target_vcard is None: copy_contact(source_vcard, selected_target_address_book, action == "move") else: if source_vcard == target_vcard: # source and target contact are identical print("Target contact: %s" % target_vcard) if action == "move": copy_contact(source_vcard, selected_target_address_book, True) else: print("The selected contacts are already identical") else: # source and target contacts are different # either overwrite the target one or merge into target contact print("The address book %s already contains the contact %s\n\n" "Source\n\n%s\n\nTarget\n\n%s\n\n" "Possible actions:\n" " a: %s anyway\n" " m: Merge from source into target contact\n" " o: Overwrite target contact\n" " q: Quit" % ( target_vcard.address_book, source_vcard, source_vcard.print_vcard(), target_vcard.print_vcard(), "Move" if action == "move" else "Copy")) while True: input_string = input("Your choice: ") if input_string.lower() == "a": copy_contact(source_vcard, selected_target_address_book, action == "move") break if input_string.lower() == "o": copy_contact(source_vcard, selected_target_address_book, action == "move") target_vcard.delete_vcard_file() break if input_string.lower() == "m": merge_existing_contacts(source_vcard, target_vcard, action == "move") break if input_string.lower() in ["", "q"]: print("Canceled") break
Copy or move a contact to a different address book. :action: the string "copy" or "move" to indicate what to do :type action: str :param vcard_list: the contact list from which to select one for the action :type vcard_list: list of carddav_object.CarddavObject :param target_address_book_list: the list of target address books :type target_address_book_list: list(addressbook.AddressBook) :returns: None :rtype: None
Below is the the instruction that describes the task: ### Input: Copy or move a contact to a different address book. :action: the string "copy" or "move" to indicate what to do :type action: str :param vcard_list: the contact list from which to select one for the action :type vcard_list: list of carddav_object.CarddavObject :param target_address_book_list: the list of target address books :type target_address_book_list: list(addressbook.AddressBook) :returns: None :rtype: None ### Response: def copy_or_move_subcommand(action, vcard_list, target_address_book_list): """Copy or move a contact to a different address book. :action: the string "copy" or "move" to indicate what to do :type action: str :param vcard_list: the contact list from which to select one for the action :type vcard_list: list of carddav_object.CarddavObject :param target_address_book_list: the list of target address books :type target_address_book_list: list(addressbook.AddressBook) :returns: None :rtype: None """ # get the source vcard, which to copy or move source_vcard = choose_vcard_from_list( "Select contact to %s" % action.title(), vcard_list) if source_vcard is None: print("Found no contact") sys.exit(1) else: print("%s contact %s from address book %s" % (action.title(), source_vcard, source_vcard.address_book)) # get target address book if len(target_address_book_list) == 1 \ and target_address_book_list[0] == source_vcard.address_book: print("The address book %s already contains the contact %s" % (target_address_book_list[0], source_vcard)) sys.exit(1) else: available_address_books = [abook for abook in target_address_book_list if abook != source_vcard.address_book] selected_target_address_book = choose_address_book_from_list( "Select target address book", available_address_books) if selected_target_address_book is None: print("Error: address book list is empty") sys.exit(1) # check if a contact already exists in the target address book target_vcard = choose_vcard_from_list( "Select target contact which to overwrite", get_contact_list_by_user_selection([selected_target_address_book], source_vcard.get_full_name(), True)) # If the target contact doesn't exist, move or copy the source contact into # the target address book without further questions. if target_vcard is None: copy_contact(source_vcard, selected_target_address_book, action == "move") else: if source_vcard == target_vcard: # source and target contact are identical print("Target contact: %s" % target_vcard) if action == "move": copy_contact(source_vcard, selected_target_address_book, True) else: print("The selected contacts are already identical") else: # source and target contacts are different # either overwrite the target one or merge into target contact print("The address book %s already contains the contact %s\n\n" "Source\n\n%s\n\nTarget\n\n%s\n\n" "Possible actions:\n" " a: %s anyway\n" " m: Merge from source into target contact\n" " o: Overwrite target contact\n" " q: Quit" % ( target_vcard.address_book, source_vcard, source_vcard.print_vcard(), target_vcard.print_vcard(), "Move" if action == "move" else "Copy")) while True: input_string = input("Your choice: ") if input_string.lower() == "a": copy_contact(source_vcard, selected_target_address_book, action == "move") break if input_string.lower() == "o": copy_contact(source_vcard, selected_target_address_book, action == "move") target_vcard.delete_vcard_file() break if input_string.lower() == "m": merge_existing_contacts(source_vcard, target_vcard, action == "move") break if input_string.lower() in ["", "q"]: print("Canceled") break
def build_rules(rule_yaml, match_plugins, action_plugins): """ Convert parsed rule YAML in to a list of ruleset objects :param rule_yaml: Dictionary parsed from YAML rule file :param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object) :param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object) :return: list of rules """ rule_sets = [] for yaml_section in rule_yaml: rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins)) return rule_sets
Convert parsed rule YAML in to a list of ruleset objects :param rule_yaml: Dictionary parsed from YAML rule file :param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object) :param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object) :return: list of rules
Below is the the instruction that describes the task: ### Input: Convert parsed rule YAML in to a list of ruleset objects :param rule_yaml: Dictionary parsed from YAML rule file :param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object) :param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object) :return: list of rules ### Response: def build_rules(rule_yaml, match_plugins, action_plugins): """ Convert parsed rule YAML in to a list of ruleset objects :param rule_yaml: Dictionary parsed from YAML rule file :param match_plugins: Dictionary of match plugins (key=config_name, value=plugin object) :param action_plugins: Dictionary of action plugins (key=config_name, value=plugin object) :return: list of rules """ rule_sets = [] for yaml_section in rule_yaml: rule_sets.append(RuleSet(yaml_section, match_plugins=match_plugins, action_plugins=action_plugins)) return rule_sets
def real_code(source): """Simplify `source` for analysis It replaces: * comments with spaces * strs with a new str filled with spaces * implicit and explicit continuations with spaces * tabs and semicolons with spaces The resulting code is a lot easier to analyze if we are interested only in offsets. """ collector = codeanalyze.ChangeCollector(source) for start, end in ignored_regions(source): if source[start] == '#': replacement = ' ' * (end - start) else: replacement = '"%s"' % (' ' * (end - start - 2)) collector.add_change(start, end, replacement) source = collector.get_changed() or source collector = codeanalyze.ChangeCollector(source) parens = 0 for match in _parens.finditer(source): i = match.start() c = match.group() if c in '({[': parens += 1 if c in ')}]': parens -= 1 if c == '\n' and parens > 0: collector.add_change(i, i + 1, ' ') source = collector.get_changed() or source return source.replace('\\\n', ' ').replace('\t', ' ').replace(';', '\n')
Simplify `source` for analysis It replaces: * comments with spaces * strs with a new str filled with spaces * implicit and explicit continuations with spaces * tabs and semicolons with spaces The resulting code is a lot easier to analyze if we are interested only in offsets.
Below is the the instruction that describes the task: ### Input: Simplify `source` for analysis It replaces: * comments with spaces * strs with a new str filled with spaces * implicit and explicit continuations with spaces * tabs and semicolons with spaces The resulting code is a lot easier to analyze if we are interested only in offsets. ### Response: def real_code(source): """Simplify `source` for analysis It replaces: * comments with spaces * strs with a new str filled with spaces * implicit and explicit continuations with spaces * tabs and semicolons with spaces The resulting code is a lot easier to analyze if we are interested only in offsets. """ collector = codeanalyze.ChangeCollector(source) for start, end in ignored_regions(source): if source[start] == '#': replacement = ' ' * (end - start) else: replacement = '"%s"' % (' ' * (end - start - 2)) collector.add_change(start, end, replacement) source = collector.get_changed() or source collector = codeanalyze.ChangeCollector(source) parens = 0 for match in _parens.finditer(source): i = match.start() c = match.group() if c in '({[': parens += 1 if c in ')}]': parens -= 1 if c == '\n' and parens > 0: collector.add_change(i, i + 1, ' ') source = collector.get_changed() or source return source.replace('\\\n', ' ').replace('\t', ' ').replace(';', '\n')
def from_json_compatible(schema, dct): "Load from json-encodable" kwargs = {} for key in dct: field_type = schema._fields.get(key) if field_type is None: warnings.warn("Unexpected field encountered in line for record %s: %r" % (schema.__name__, key)) continue kwargs[key] = field_type.avro_load(dct[key]) return schema(**kwargs)
Load from json-encodable
Below is the the instruction that describes the task: ### Input: Load from json-encodable ### Response: def from_json_compatible(schema, dct): "Load from json-encodable" kwargs = {} for key in dct: field_type = schema._fields.get(key) if field_type is None: warnings.warn("Unexpected field encountered in line for record %s: %r" % (schema.__name__, key)) continue kwargs[key] = field_type.avro_load(dct[key]) return schema(**kwargs)
def p_information_constructor(self, p): 'information : NUMBER INFORMATION_UNIT' logger.debug('information = number %s, information unit %s', p[1], p[2]) p[0] = Information.from_quantity_unit(p[1], p[2])
information : NUMBER INFORMATION_UNIT
Below is the the instruction that describes the task: ### Input: information : NUMBER INFORMATION_UNIT ### Response: def p_information_constructor(self, p): 'information : NUMBER INFORMATION_UNIT' logger.debug('information = number %s, information unit %s', p[1], p[2]) p[0] = Information.from_quantity_unit(p[1], p[2])
def _search_uid(self, query): """Search for contacts with a matching uid. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) """ try: # First we treat the argument as a full UID and try to match it # exactly. yield self.contacts[query] except KeyError: # If that failed we look for all contacts whos UID start with the # given query. for uid in self.contacts: if uid.startswith(query): yield self.contacts[uid]
Search for contacts with a matching uid. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject)
Below is the the instruction that describes the task: ### Input: Search for contacts with a matching uid. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) ### Response: def _search_uid(self, query): """Search for contacts with a matching uid. :param query: the query to search for :type query: str :yields: all found contacts :rtype: generator(carddav_object.CarddavObject) """ try: # First we treat the argument as a full UID and try to match it # exactly. yield self.contacts[query] except KeyError: # If that failed we look for all contacts whos UID start with the # given query. for uid in self.contacts: if uid.startswith(query): yield self.contacts[uid]
def get_default_config(self): """ Returns the default collector settings """ config = super(RabbitMQCollector, self).get_default_config() config.update({ 'path': 'rabbitmq', 'host': 'localhost:55672', 'user': 'guest', 'password': 'guest', 'replace_dot': False, 'replace_slash': False, 'queues_ignored': '', 'cluster': False, 'scheme': 'http', 'query_individual_queues': False, }) return config
Returns the default collector settings
Below is the the instruction that describes the task: ### Input: Returns the default collector settings ### Response: def get_default_config(self): """ Returns the default collector settings """ config = super(RabbitMQCollector, self).get_default_config() config.update({ 'path': 'rabbitmq', 'host': 'localhost:55672', 'user': 'guest', 'password': 'guest', 'replace_dot': False, 'replace_slash': False, 'queues_ignored': '', 'cluster': False, 'scheme': 'http', 'query_individual_queues': False, }) return config
def get_appdir(self, portable_path=None, folder=None, create=False): ''' path = uac_bypass(file) This function will only operate when your program is installed check the is_installed function for details Moves working data to another folder. The idea is to get around security and uac on windows vista + returns cwd on linux, windows returns path with write access: C:\\ProgramData\\appname here if a file is passed in it will be appended to the path set create to true to create the file in the programData folder setting overwrite to True will silently overwrite, otherwise a FileExistsError is raised Setting overwrite to get, will get the file path instead of throwing an error ---Background---- If a user is using windows, Read write access is restriced in the Program Files directory without prompting for uac. We create a folder in c:\Program Data\ of the program name and save logging data etc there. This way the program doesnt need admin rights. ''' if self.is_installed(): appdir = appdirs.user_data_dir(self.app_name) elif portable_path: appdir = portable_path if not folder: folder = 'data^-^' else: appdir = os.getcwd() if folder: path = os.path.join(appdir, folder) else: path = appdir path = os.path.realpath(path) if create: os.makedirs(path, exist_ok=1) return path
path = uac_bypass(file) This function will only operate when your program is installed check the is_installed function for details Moves working data to another folder. The idea is to get around security and uac on windows vista + returns cwd on linux, windows returns path with write access: C:\\ProgramData\\appname here if a file is passed in it will be appended to the path set create to true to create the file in the programData folder setting overwrite to True will silently overwrite, otherwise a FileExistsError is raised Setting overwrite to get, will get the file path instead of throwing an error ---Background---- If a user is using windows, Read write access is restriced in the Program Files directory without prompting for uac. We create a folder in c:\Program Data\ of the program name and save logging data etc there. This way the program doesnt need admin rights.
Below is the the instruction that describes the task: ### Input: path = uac_bypass(file) This function will only operate when your program is installed check the is_installed function for details Moves working data to another folder. The idea is to get around security and uac on windows vista + returns cwd on linux, windows returns path with write access: C:\\ProgramData\\appname here if a file is passed in it will be appended to the path set create to true to create the file in the programData folder setting overwrite to True will silently overwrite, otherwise a FileExistsError is raised Setting overwrite to get, will get the file path instead of throwing an error ---Background---- If a user is using windows, Read write access is restriced in the Program Files directory without prompting for uac. We create a folder in c:\Program Data\ of the program name and save logging data etc there. This way the program doesnt need admin rights. ### Response: def get_appdir(self, portable_path=None, folder=None, create=False): ''' path = uac_bypass(file) This function will only operate when your program is installed check the is_installed function for details Moves working data to another folder. The idea is to get around security and uac on windows vista + returns cwd on linux, windows returns path with write access: C:\\ProgramData\\appname here if a file is passed in it will be appended to the path set create to true to create the file in the programData folder setting overwrite to True will silently overwrite, otherwise a FileExistsError is raised Setting overwrite to get, will get the file path instead of throwing an error ---Background---- If a user is using windows, Read write access is restriced in the Program Files directory without prompting for uac. We create a folder in c:\Program Data\ of the program name and save logging data etc there. This way the program doesnt need admin rights. ''' if self.is_installed(): appdir = appdirs.user_data_dir(self.app_name) elif portable_path: appdir = portable_path if not folder: folder = 'data^-^' else: appdir = os.getcwd() if folder: path = os.path.join(appdir, folder) else: path = appdir path = os.path.realpath(path) if create: os.makedirs(path, exist_ok=1) return path
def pre_save(self, instance, add): """ Updates the edtf value from the value of the display_field. If there's a valid edtf, then set the date values. """ if not self.natural_text_field or self.attname not in instance.__dict__: return edtf = getattr(instance, self.attname) # Update EDTF field based on latest natural text value, if any natural_text = getattr(instance, self.natural_text_field) if natural_text: edtf = text_to_edtf(natural_text) else: edtf = None # TODO If `natural_text_field` becomes cleared the derived EDTF field # value should also be cleared, rather than left at original value? # TODO Handle case where EDTF field is set to a string directly, not # via `natural_text_field` (this is a slightly unexpected use-case, but # is a very efficient way to set EDTF values in situations like for API # imports so we probably want to continue to support it?) if edtf and not isinstance(edtf, EDTFObject): edtf = parse_edtf(edtf, fail_silently=True) setattr(instance, self.attname, edtf) # set or clear related date fields on the instance for attr in DATE_ATTRS: field_attr = "%s_field" % attr g = getattr(self, field_attr, None) if g: if edtf: try: target_field = instance._meta.get_field(g) except FieldDoesNotExist: continue value = getattr(edtf, attr)() # struct_time if isinstance(target_field, models.FloatField): value = struct_time_to_jd(value) elif isinstance(target_field, models.DateField): value = struct_time_to_date(value) else: raise NotImplementedError( u"EDTFField does not support %s as a derived data" u" field, only FloatField or DateField" % type(target_field)) setattr(instance, g, value) else: setattr(instance, g, None) return edtf
Updates the edtf value from the value of the display_field. If there's a valid edtf, then set the date values.
Below is the the instruction that describes the task: ### Input: Updates the edtf value from the value of the display_field. If there's a valid edtf, then set the date values. ### Response: def pre_save(self, instance, add): """ Updates the edtf value from the value of the display_field. If there's a valid edtf, then set the date values. """ if not self.natural_text_field or self.attname not in instance.__dict__: return edtf = getattr(instance, self.attname) # Update EDTF field based on latest natural text value, if any natural_text = getattr(instance, self.natural_text_field) if natural_text: edtf = text_to_edtf(natural_text) else: edtf = None # TODO If `natural_text_field` becomes cleared the derived EDTF field # value should also be cleared, rather than left at original value? # TODO Handle case where EDTF field is set to a string directly, not # via `natural_text_field` (this is a slightly unexpected use-case, but # is a very efficient way to set EDTF values in situations like for API # imports so we probably want to continue to support it?) if edtf and not isinstance(edtf, EDTFObject): edtf = parse_edtf(edtf, fail_silently=True) setattr(instance, self.attname, edtf) # set or clear related date fields on the instance for attr in DATE_ATTRS: field_attr = "%s_field" % attr g = getattr(self, field_attr, None) if g: if edtf: try: target_field = instance._meta.get_field(g) except FieldDoesNotExist: continue value = getattr(edtf, attr)() # struct_time if isinstance(target_field, models.FloatField): value = struct_time_to_jd(value) elif isinstance(target_field, models.DateField): value = struct_time_to_date(value) else: raise NotImplementedError( u"EDTFField does not support %s as a derived data" u" field, only FloatField or DateField" % type(target_field)) setattr(instance, g, value) else: setattr(instance, g, None) return edtf
def get_vars(self): """ Returns a complete dict of all variables that are defined in this scope, including the variables of the parent. """ if self.parent is None: vars = {} vars.update(self.variables) return vars vars = self.parent.get_vars() vars.update(self.variables) return vars
Returns a complete dict of all variables that are defined in this scope, including the variables of the parent.
Below is the the instruction that describes the task: ### Input: Returns a complete dict of all variables that are defined in this scope, including the variables of the parent. ### Response: def get_vars(self): """ Returns a complete dict of all variables that are defined in this scope, including the variables of the parent. """ if self.parent is None: vars = {} vars.update(self.variables) return vars vars = self.parent.get_vars() vars.update(self.variables) return vars
def integridad_data(self, data_integr=None, key=None): """ Definición específica para comprobar timezone y frecuencia de los datos, además de comprobar que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente :param data_integr: :param key: """ if data_integr is None and key is None and all(k in self.data.keys() for k in KEYS_DATA_DEM): assert(self.data[KEYS_DATA_DEM[0]].index.freq == FREQ_DAT_DEM and self.data[KEYS_DATA_DEM[0]].index.tz == self.TZ) if self.data[KEYS_DATA_DEM[1]] is not None: assert(self.data[KEYS_DATA_DEM[1]].index.freq == 'D') super(DatosREE, self).integridad_data(data_integr, key)
Definición específica para comprobar timezone y frecuencia de los datos, además de comprobar que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente :param data_integr: :param key:
Below is the the instruction that describes the task: ### Input: Definición específica para comprobar timezone y frecuencia de los datos, además de comprobar que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente :param data_integr: :param key: ### Response: def integridad_data(self, data_integr=None, key=None): """ Definición específica para comprobar timezone y frecuencia de los datos, además de comprobar que el index de cada dataframe de la base de datos sea de fechas, único (sin duplicados) y creciente :param data_integr: :param key: """ if data_integr is None and key is None and all(k in self.data.keys() for k in KEYS_DATA_DEM): assert(self.data[KEYS_DATA_DEM[0]].index.freq == FREQ_DAT_DEM and self.data[KEYS_DATA_DEM[0]].index.tz == self.TZ) if self.data[KEYS_DATA_DEM[1]] is not None: assert(self.data[KEYS_DATA_DEM[1]].index.freq == 'D') super(DatosREE, self).integridad_data(data_integr, key)
def dynamic_load(name): """Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar" """ pieces = name.split('.') item = pieces[-1] mod_name = '.'.join(pieces[:-1]) mod = __import__(mod_name, globals(), locals(), [item]) return getattr(mod, item)
Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar"
Below is the the instruction that describes the task: ### Input: Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar" ### Response: def dynamic_load(name): """Equivalent of "from X import Y" statement using dot notation to specify what to import and return. For example, foo.bar.thing returns the item "thing" in the module "foo.bar" """ pieces = name.split('.') item = pieces[-1] mod_name = '.'.join(pieces[:-1]) mod = __import__(mod_name, globals(), locals(), [item]) return getattr(mod, item)
def download_source_dists(self, arguments, use_wheels=False): """ Download missing source distributions. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :raises: Any exceptions raised by pip. """ download_timer = Timer() logger.info("Downloading missing distribution(s) ..") requirements = self.get_pip_requirement_set(arguments, use_remote_index=True, use_wheels=use_wheels) logger.info("Finished downloading distribution(s) in %s.", download_timer) return requirements
Download missing source distributions. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :raises: Any exceptions raised by pip.
Below is the the instruction that describes the task: ### Input: Download missing source distributions. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :raises: Any exceptions raised by pip. ### Response: def download_source_dists(self, arguments, use_wheels=False): """ Download missing source distributions. :param arguments: The command line arguments to ``pip install ...`` (a list of strings). :param use_wheels: Whether pip and pip-accel are allowed to use wheels_ (:data:`False` by default for backwards compatibility with callers that use pip-accel as a Python API). :raises: Any exceptions raised by pip. """ download_timer = Timer() logger.info("Downloading missing distribution(s) ..") requirements = self.get_pip_requirement_set(arguments, use_remote_index=True, use_wheels=use_wheels) logger.info("Finished downloading distribution(s) in %s.", download_timer) return requirements
def com_google_fonts_check_gasp(ttFont): """Is 'gasp' table set to optimize rendering?""" if "gasp" not in ttFont.keys(): yield FAIL, ("Font is missing the 'gasp' table." " Try exporting the font with autohinting enabled.") else: if not isinstance(ttFont["gasp"].gaspRange, dict): yield FAIL, "'gasp' table has no values." else: failed = False if 0xFFFF not in ttFont["gasp"].gaspRange: yield WARN, ("'gasp' table does not have an entry for all" " font sizes (gaspRange 0xFFFF).") else: gasp_meaning = { 0x01: "- Use gridfitting", 0x02: "- Use grayscale rendering", 0x04: "- Use gridfitting with ClearType symmetric smoothing", 0x08: "- Use smoothing along multiple axes with ClearType®" } table = [] for key in ttFont["gasp"].gaspRange.keys(): value = ttFont["gasp"].gaspRange[key] meaning = [] for flag, info in gasp_meaning.items(): if value & flag: meaning.append(info) meaning = "\n\t".join(meaning) table.append(f"PPM <= {key}:\n\tflag = 0x{value:02X}\n\t{meaning}") table = "\n".join(table) yield INFO, ("These are the ppm ranges declared on the" f" gasp table:\n\n{table}\n") for key in ttFont["gasp"].gaspRange.keys(): if key != 0xFFFF: yield WARN, ("'gasp' table has a gaspRange of {} that" " may be unneccessary.").format(key) failed = True else: value = ttFont["gasp"].gaspRange[0xFFFF] if value != 0x0F: failed = True yield WARN, (f"gaspRange 0xFFFF value 0x{value:02X}" " should be set to 0x0F.") if not failed: yield PASS, ("'gasp' table is correctly set, with one " "gaspRange:value of 0xFFFF:0x0F.")
Is 'gasp' table set to optimize rendering?
Below is the the instruction that describes the task: ### Input: Is 'gasp' table set to optimize rendering? ### Response: def com_google_fonts_check_gasp(ttFont): """Is 'gasp' table set to optimize rendering?""" if "gasp" not in ttFont.keys(): yield FAIL, ("Font is missing the 'gasp' table." " Try exporting the font with autohinting enabled.") else: if not isinstance(ttFont["gasp"].gaspRange, dict): yield FAIL, "'gasp' table has no values." else: failed = False if 0xFFFF not in ttFont["gasp"].gaspRange: yield WARN, ("'gasp' table does not have an entry for all" " font sizes (gaspRange 0xFFFF).") else: gasp_meaning = { 0x01: "- Use gridfitting", 0x02: "- Use grayscale rendering", 0x04: "- Use gridfitting with ClearType symmetric smoothing", 0x08: "- Use smoothing along multiple axes with ClearType®" } table = [] for key in ttFont["gasp"].gaspRange.keys(): value = ttFont["gasp"].gaspRange[key] meaning = [] for flag, info in gasp_meaning.items(): if value & flag: meaning.append(info) meaning = "\n\t".join(meaning) table.append(f"PPM <= {key}:\n\tflag = 0x{value:02X}\n\t{meaning}") table = "\n".join(table) yield INFO, ("These are the ppm ranges declared on the" f" gasp table:\n\n{table}\n") for key in ttFont["gasp"].gaspRange.keys(): if key != 0xFFFF: yield WARN, ("'gasp' table has a gaspRange of {} that" " may be unneccessary.").format(key) failed = True else: value = ttFont["gasp"].gaspRange[0xFFFF] if value != 0x0F: failed = True yield WARN, (f"gaspRange 0xFFFF value 0x{value:02X}" " should be set to 0x0F.") if not failed: yield PASS, ("'gasp' table is correctly set, with one " "gaspRange:value of 0xFFFF:0x0F.")
def deserialize(self, raw_jws, key=None, alg=None): """Deserialize a JWS token. NOTE: Destroys any current status and tries to import the raw JWS provided. :param raw_jws: a 'raw' JWS token (JSON Encoded or Compact notation) string. :param key: A (:class:`jwcrypto.jwk.JWK`) verification key (optional). If a key is provided a verification step will be attempted after the object is successfully deserialized. :param alg: The signing algorithm (optional). usually the algorithm is known as it is provided with the JOSE Headers of the token. :raises InvalidJWSObject: if the raw object is an invaid JWS token. :raises InvalidJWSSignature: if the verification fails. """ self.objects = dict() o = dict() try: try: djws = json_decode(raw_jws) if 'signatures' in djws: o['signatures'] = list() for s in djws['signatures']: os = self._deserialize_signature(s) o['signatures'].append(os) self._deserialize_b64(o, os.get('protected')) else: o = self._deserialize_signature(djws) self._deserialize_b64(o, o.get('protected')) if 'payload' in djws: if o.get('b64', True): o['payload'] = base64url_decode(str(djws['payload'])) else: o['payload'] = djws['payload'] except ValueError: c = raw_jws.split('.') if len(c) != 3: raise InvalidJWSObject('Unrecognized representation') p = base64url_decode(str(c[0])) if len(p) > 0: o['protected'] = p.decode('utf-8') self._deserialize_b64(o, o['protected']) o['payload'] = base64url_decode(str(c[1])) o['signature'] = base64url_decode(str(c[2])) self.objects = o except Exception as e: # pylint: disable=broad-except raise InvalidJWSObject('Invalid format', repr(e)) if key: self.verify(key, alg)
Deserialize a JWS token. NOTE: Destroys any current status and tries to import the raw JWS provided. :param raw_jws: a 'raw' JWS token (JSON Encoded or Compact notation) string. :param key: A (:class:`jwcrypto.jwk.JWK`) verification key (optional). If a key is provided a verification step will be attempted after the object is successfully deserialized. :param alg: The signing algorithm (optional). usually the algorithm is known as it is provided with the JOSE Headers of the token. :raises InvalidJWSObject: if the raw object is an invaid JWS token. :raises InvalidJWSSignature: if the verification fails.
Below is the the instruction that describes the task: ### Input: Deserialize a JWS token. NOTE: Destroys any current status and tries to import the raw JWS provided. :param raw_jws: a 'raw' JWS token (JSON Encoded or Compact notation) string. :param key: A (:class:`jwcrypto.jwk.JWK`) verification key (optional). If a key is provided a verification step will be attempted after the object is successfully deserialized. :param alg: The signing algorithm (optional). usually the algorithm is known as it is provided with the JOSE Headers of the token. :raises InvalidJWSObject: if the raw object is an invaid JWS token. :raises InvalidJWSSignature: if the verification fails. ### Response: def deserialize(self, raw_jws, key=None, alg=None): """Deserialize a JWS token. NOTE: Destroys any current status and tries to import the raw JWS provided. :param raw_jws: a 'raw' JWS token (JSON Encoded or Compact notation) string. :param key: A (:class:`jwcrypto.jwk.JWK`) verification key (optional). If a key is provided a verification step will be attempted after the object is successfully deserialized. :param alg: The signing algorithm (optional). usually the algorithm is known as it is provided with the JOSE Headers of the token. :raises InvalidJWSObject: if the raw object is an invaid JWS token. :raises InvalidJWSSignature: if the verification fails. """ self.objects = dict() o = dict() try: try: djws = json_decode(raw_jws) if 'signatures' in djws: o['signatures'] = list() for s in djws['signatures']: os = self._deserialize_signature(s) o['signatures'].append(os) self._deserialize_b64(o, os.get('protected')) else: o = self._deserialize_signature(djws) self._deserialize_b64(o, o.get('protected')) if 'payload' in djws: if o.get('b64', True): o['payload'] = base64url_decode(str(djws['payload'])) else: o['payload'] = djws['payload'] except ValueError: c = raw_jws.split('.') if len(c) != 3: raise InvalidJWSObject('Unrecognized representation') p = base64url_decode(str(c[0])) if len(p) > 0: o['protected'] = p.decode('utf-8') self._deserialize_b64(o, o['protected']) o['payload'] = base64url_decode(str(c[1])) o['signature'] = base64url_decode(str(c[2])) self.objects = o except Exception as e: # pylint: disable=broad-except raise InvalidJWSObject('Invalid format', repr(e)) if key: self.verify(key, alg)
def run(arguments: typing.List[str] = None): """Executes the cauldron command""" initialize() from cauldron.invoke import parser from cauldron.invoke import invoker args = parser.parse(arguments) exit_code = invoker.run(args.get('command'), args) sys.exit(exit_code)
Executes the cauldron command
Below is the the instruction that describes the task: ### Input: Executes the cauldron command ### Response: def run(arguments: typing.List[str] = None): """Executes the cauldron command""" initialize() from cauldron.invoke import parser from cauldron.invoke import invoker args = parser.parse(arguments) exit_code = invoker.run(args.get('command'), args) sys.exit(exit_code)
def support_in_progress_warcs(): ''' Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still being written to (warcs having ".open" suffix). This way if a cdx entry references foo.warc.gz, pywb will try both foo.warc.gz and foo.warc.gz.open. ''' _orig_prefix_resolver_call = pywb.warc.pathresolvers.PrefixResolver.__call__ def _prefix_resolver_call(self, filename, cdx=None): raw_results = _orig_prefix_resolver_call(self, filename, cdx) results = [] for warc_path in raw_results: results.append(warc_path) results.append('%s.open' % warc_path) return results pywb.warc.pathresolvers.PrefixResolver.__call__ = _prefix_resolver_call
Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still being written to (warcs having ".open" suffix). This way if a cdx entry references foo.warc.gz, pywb will try both foo.warc.gz and foo.warc.gz.open.
Below is the the instruction that describes the task: ### Input: Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still being written to (warcs having ".open" suffix). This way if a cdx entry references foo.warc.gz, pywb will try both foo.warc.gz and foo.warc.gz.open. ### Response: def support_in_progress_warcs(): ''' Monkey-patch pywb.warc.pathresolvers.PrefixResolver to include warcs still being written to (warcs having ".open" suffix). This way if a cdx entry references foo.warc.gz, pywb will try both foo.warc.gz and foo.warc.gz.open. ''' _orig_prefix_resolver_call = pywb.warc.pathresolvers.PrefixResolver.__call__ def _prefix_resolver_call(self, filename, cdx=None): raw_results = _orig_prefix_resolver_call(self, filename, cdx) results = [] for warc_path in raw_results: results.append(warc_path) results.append('%s.open' % warc_path) return results pywb.warc.pathresolvers.PrefixResolver.__call__ = _prefix_resolver_call
def main(argv=None): """Parse passed in cooked single HTML.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('collated_html', type=argparse.FileType('r'), help='Path to the collated html' ' file (use - for stdin)') parser.add_argument('-d', '--dump-tree', action='store_true', help='Print out parsed model tree.') parser.add_argument('-o', '--output', type=argparse.FileType('w+'), help='Write out epub of parsed tree.') parser.add_argument('-i', '--input', type=argparse.FileType('r'), help='Read and copy resources/ for output epub.') args = parser.parse_args(argv) if args.input and args.output == sys.stdout: raise ValueError('Cannot output to stdout if reading resources') from cnxepub.collation import reconstitute binder = reconstitute(args.collated_html) if args.dump_tree: print(pformat(cnxepub.model_to_tree(binder)), file=sys.stdout) if args.output: cnxepub.adapters.make_epub(binder, args.output) if args.input: args.output.seek(0) zout = ZipFile(args.output, 'a', ZIP_DEFLATED) zin = ZipFile(args.input, 'r') for res in zin.namelist(): if res.startswith('resources'): zres = zin.open(res) zi = zin.getinfo(res) zout.writestr(zi, zres.read(), ZIP_DEFLATED) zout.close() # TODO Check for documents that have no identifier. # These should likely be composite-documents # or the the metadata got wiped out. # docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter) # if x.ident_hash is None] return 0
Parse passed in cooked single HTML.
Below is the the instruction that describes the task: ### Input: Parse passed in cooked single HTML. ### Response: def main(argv=None): """Parse passed in cooked single HTML.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('collated_html', type=argparse.FileType('r'), help='Path to the collated html' ' file (use - for stdin)') parser.add_argument('-d', '--dump-tree', action='store_true', help='Print out parsed model tree.') parser.add_argument('-o', '--output', type=argparse.FileType('w+'), help='Write out epub of parsed tree.') parser.add_argument('-i', '--input', type=argparse.FileType('r'), help='Read and copy resources/ for output epub.') args = parser.parse_args(argv) if args.input and args.output == sys.stdout: raise ValueError('Cannot output to stdout if reading resources') from cnxepub.collation import reconstitute binder = reconstitute(args.collated_html) if args.dump_tree: print(pformat(cnxepub.model_to_tree(binder)), file=sys.stdout) if args.output: cnxepub.adapters.make_epub(binder, args.output) if args.input: args.output.seek(0) zout = ZipFile(args.output, 'a', ZIP_DEFLATED) zin = ZipFile(args.input, 'r') for res in zin.namelist(): if res.startswith('resources'): zres = zin.open(res) zi = zin.getinfo(res) zout.writestr(zi, zres.read(), ZIP_DEFLATED) zout.close() # TODO Check for documents that have no identifier. # These should likely be composite-documents # or the the metadata got wiped out. # docs = [x for x in cnxepub.flatten_to(binder, only_documents_filter) # if x.ident_hash is None] return 0
def leaf_list(cls, name, parent=None, interleave=None): """Create _list_ node for a leaf-list.""" node = cls("_list_", parent, interleave=interleave) node.attr["name"] = name node.keys = None node.minEl = "0" node.maxEl = None node.occur = 3 return node
Create _list_ node for a leaf-list.
Below is the the instruction that describes the task: ### Input: Create _list_ node for a leaf-list. ### Response: def leaf_list(cls, name, parent=None, interleave=None): """Create _list_ node for a leaf-list.""" node = cls("_list_", parent, interleave=interleave) node.attr["name"] = name node.keys = None node.minEl = "0" node.maxEl = None node.occur = 3 return node
def require(*reqs): '''Require tasks or files at runtime.''' for req in reqs: if type(req) is str: # does not exist and unknown generator if not os.path.exists(req) and req not in GENERATES: abort(LOCALE['abort_bad_file'].format(req)) # exists but unknown generator if req not in GENERATES: return # exists and known generator if req in GENERATES: req = GENERATES[req] if req.valid is None: if len(req.args): abort(LOCALE['abort_bad_args'], req, len(req.args)) req() if req.valid is False: abort(LOCALE['abort_bad_task'], req)
Require tasks or files at runtime.
Below is the the instruction that describes the task: ### Input: Require tasks or files at runtime. ### Response: def require(*reqs): '''Require tasks or files at runtime.''' for req in reqs: if type(req) is str: # does not exist and unknown generator if not os.path.exists(req) and req not in GENERATES: abort(LOCALE['abort_bad_file'].format(req)) # exists but unknown generator if req not in GENERATES: return # exists and known generator if req in GENERATES: req = GENERATES[req] if req.valid is None: if len(req.args): abort(LOCALE['abort_bad_args'], req, len(req.args)) req() if req.valid is False: abort(LOCALE['abort_bad_task'], req)
def load_all_distributions(self): """Replace the :attr:`distributions` attribute with all scipy distributions""" distributions = [] for this in dir(scipy.stats): if "fit" in eval("dir(scipy.stats." + this +")"): distributions.append(this) self.distributions = distributions[:]
Replace the :attr:`distributions` attribute with all scipy distributions
Below is the the instruction that describes the task: ### Input: Replace the :attr:`distributions` attribute with all scipy distributions ### Response: def load_all_distributions(self): """Replace the :attr:`distributions` attribute with all scipy distributions""" distributions = [] for this in dir(scipy.stats): if "fit" in eval("dir(scipy.stats." + this +")"): distributions.append(this) self.distributions = distributions[:]