code
stringlengths
75
104k
code_sememe
stringlengths
47
309k
token_type
stringlengths
215
214k
code_dependency
stringlengths
75
155k
def granules(self): """Return list of SentinelGranule objects.""" for element in self._product_metadata.iter("Product_Info"): product_organisation = element.find("Product_Organisation") if self.product_format == 'SAFE': return [ SentinelGranule(_id.find("Granules"), self) for _id in product_organisation.findall("Granule_List") ] elif self.product_format == 'SAFE_COMPACT': return [ SentinelGranuleCompact(_id.find("Granule"), self) for _id in product_organisation.findall("Granule_List") ] else: raise Exception( "PRODUCT_FORMAT not recognized in metadata file, found: '" + str(self.safe_format) + "' accepted are 'SAFE' and 'SAFE_COMPACT'" )
def function[granules, parameter[self]]: constant[Return list of SentinelGranule objects.] for taget[name[element]] in starred[call[name[self]._product_metadata.iter, parameter[constant[Product_Info]]]] begin[:] variable[product_organisation] assign[=] call[name[element].find, parameter[constant[Product_Organisation]]] if compare[name[self].product_format equal[==] constant[SAFE]] begin[:] return[<ast.ListComp object at 0x7da1b255af20>]
keyword[def] identifier[granules] ( identifier[self] ): literal[string] keyword[for] identifier[element] keyword[in] identifier[self] . identifier[_product_metadata] . identifier[iter] ( literal[string] ): identifier[product_organisation] = identifier[element] . identifier[find] ( literal[string] ) keyword[if] identifier[self] . identifier[product_format] == literal[string] : keyword[return] [ identifier[SentinelGranule] ( identifier[_id] . identifier[find] ( literal[string] ), identifier[self] ) keyword[for] identifier[_id] keyword[in] identifier[product_organisation] . identifier[findall] ( literal[string] ) ] keyword[elif] identifier[self] . identifier[product_format] == literal[string] : keyword[return] [ identifier[SentinelGranuleCompact] ( identifier[_id] . identifier[find] ( literal[string] ), identifier[self] ) keyword[for] identifier[_id] keyword[in] identifier[product_organisation] . identifier[findall] ( literal[string] ) ] keyword[else] : keyword[raise] identifier[Exception] ( literal[string] + identifier[str] ( identifier[self] . identifier[safe_format] )+ literal[string] )
def granules(self): """Return list of SentinelGranule objects.""" for element in self._product_metadata.iter('Product_Info'): product_organisation = element.find('Product_Organisation') # depends on [control=['for'], data=['element']] if self.product_format == 'SAFE': return [SentinelGranule(_id.find('Granules'), self) for _id in product_organisation.findall('Granule_List')] # depends on [control=['if'], data=[]] elif self.product_format == 'SAFE_COMPACT': return [SentinelGranuleCompact(_id.find('Granule'), self) for _id in product_organisation.findall('Granule_List')] # depends on [control=['if'], data=[]] else: raise Exception("PRODUCT_FORMAT not recognized in metadata file, found: '" + str(self.safe_format) + "' accepted are 'SAFE' and 'SAFE_COMPACT'")
def get_image_by_kind(self, kind): """ returns a image of a specific kind """ for ss in self.images: if ss.kind == kind: return ss return None
def function[get_image_by_kind, parameter[self, kind]]: constant[ returns a image of a specific kind ] for taget[name[ss]] in starred[name[self].images] begin[:] if compare[name[ss].kind equal[==] name[kind]] begin[:] return[name[ss]] return[constant[None]]
keyword[def] identifier[get_image_by_kind] ( identifier[self] , identifier[kind] ): literal[string] keyword[for] identifier[ss] keyword[in] identifier[self] . identifier[images] : keyword[if] identifier[ss] . identifier[kind] == identifier[kind] : keyword[return] identifier[ss] keyword[return] keyword[None]
def get_image_by_kind(self, kind): """ returns a image of a specific kind """ for ss in self.images: if ss.kind == kind: return ss # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ss']] return None
def extract_features(self, data_frame, pre=''): """ This method extracts all the features available to the Tremor Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, \ bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, \ magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, \ magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, \ magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, \ magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, \ magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, \ magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, \ magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, \ magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, \ magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, \ magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, \ magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, \ magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, \ magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values :rtype: list """ try: magnitude_partial_autocorrelation = self.partial_autocorrelation(data_frame.mag_sum_acc) magnitude_agg_linear = self.agg_linear_trend(data_frame.mag_sum_acc) magnitude_spkt_welch_density = self.spkt_welch_density(data_frame.mag_sum_acc) magnitude_fft_coefficient = self.fft_coefficient(data_frame.mag_sum_acc) return {pre+'amplitude_by_fft': self.amplitude(data_frame)[0], pre+'frequency_by_fft': self.amplitude(data_frame)[1], pre+'amplitude_by_welch': self.amplitude(data_frame, 'welch')[0], pre+'frequency_by_welch': self.amplitude(data_frame, 'welch')[1], pre+'bradykinesia_amplitude_by_fft': self.bradykinesia(data_frame)[0], pre+'bradykinesia_frequency_by_fft': self.bradykinesia(data_frame)[1], pre+'bradykinesia_amplitude_by_welch': self.bradykinesia(data_frame, 'welch')[0], pre+'bradykinesia_frequency_by_welch': self.bradykinesia(data_frame, 'welch')[1], pre+'magnitude_approximate_entropy': self.approximate_entropy(data_frame.mag_sum_acc), pre+'magnitude_autocorrelation_lag_8': self.autocorrelation(data_frame.mag_sum_acc, 8), pre+'magnitude_autocorrelation_lag_9': self.autocorrelation(data_frame.mag_sum_acc, 9), pre+'magnitude_partial_autocorrelation_lag_3': magnitude_partial_autocorrelation[0][1], pre+'magnitude_partial_autocorrelation_lag_5': magnitude_partial_autocorrelation[1][1], pre+'magnitude_partial_autocorrelation_lag_6': magnitude_partial_autocorrelation[2][1], pre+'magnitude_minimum': self.minimum(data_frame.mag_sum_acc), pre+'magnitude_mean': self.mean(data_frame.mag_sum_acc), pre+'magnitude_ratio_value_number_to_time_series_length': self.ratio_value_number_to_time_series_length(data_frame.mag_sum_acc), pre+'magnitude_change_quantiles': self.change_quantiles(data_frame.mag_sum_acc), pre+'magnitude_number_peaks': self.number_peaks(data_frame.mag_sum_acc), pre+'magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept': magnitude_agg_linear[0][1], pre+'magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue': magnitude_agg_linear[1][1], pre+'magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept': magnitude_agg_linear[2][1], pre+'magnitude_spkt_welch_density_coeff_2': magnitude_spkt_welch_density[0][1], pre+'magnitude_spkt_welch_density_coeff_5': magnitude_spkt_welch_density[1][1], pre+'magnitude_spkt_welch_density_coeff_8': magnitude_spkt_welch_density[2][1], pre+'magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints': self.percentage_of_reoccurring_datapoints_to_all_datapoints(data_frame.mag_sum_acc), pre+'magnitude_abs_energy': self.abs_energy(data_frame.mag_sum_acc), pre+'magnitude_fft_aggregated_centroid': self.fft_aggregated(data_frame.mag_sum_acc)[0][1], pre+'magnitude_fft_coefficient_abs_coeff_44': magnitude_fft_coefficient[0][1], pre+'magnitude_fft_coefficient_abs_coeff_63': magnitude_fft_coefficient[1][1], pre+'magnitude_fft_coefficient_abs_coeff_0': magnitude_fft_coefficient[2][1], pre+'magnitude_fft_coefficient_real_coeff_0': magnitude_fft_coefficient[3][1], pre+'magnitude_fft_coefficient_real_coeff_23': magnitude_fft_coefficient[4][1], pre+'magnitude_sum_values': self.sum_values(data_frame.mag_sum_acc)} except: logging.error("Error on TremorProcessor process, extract features: %s", sys.exc_info()[0])
def function[extract_features, parameter[self, data_frame, pre]]: constant[ This method extracts all the features available to the Tremor Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values :rtype: list ] <ast.Try object at 0x7da18dc99ab0>
keyword[def] identifier[extract_features] ( identifier[self] , identifier[data_frame] , identifier[pre] = literal[string] ): literal[string] keyword[try] : identifier[magnitude_partial_autocorrelation] = identifier[self] . identifier[partial_autocorrelation] ( identifier[data_frame] . identifier[mag_sum_acc] ) identifier[magnitude_agg_linear] = identifier[self] . identifier[agg_linear_trend] ( identifier[data_frame] . identifier[mag_sum_acc] ) identifier[magnitude_spkt_welch_density] = identifier[self] . identifier[spkt_welch_density] ( identifier[data_frame] . identifier[mag_sum_acc] ) identifier[magnitude_fft_coefficient] = identifier[self] . identifier[fft_coefficient] ( identifier[data_frame] . identifier[mag_sum_acc] ) keyword[return] { identifier[pre] + literal[string] : identifier[self] . identifier[amplitude] ( identifier[data_frame] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[amplitude] ( identifier[data_frame] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[amplitude] ( identifier[data_frame] , literal[string] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[amplitude] ( identifier[data_frame] , literal[string] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[bradykinesia] ( identifier[data_frame] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[bradykinesia] ( identifier[data_frame] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[bradykinesia] ( identifier[data_frame] , literal[string] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[bradykinesia] ( identifier[data_frame] , literal[string] )[ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[approximate_entropy] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[autocorrelation] ( identifier[data_frame] . identifier[mag_sum_acc] , literal[int] ), identifier[pre] + literal[string] : identifier[self] . identifier[autocorrelation] ( identifier[data_frame] . identifier[mag_sum_acc] , literal[int] ), identifier[pre] + literal[string] : identifier[magnitude_partial_autocorrelation] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_partial_autocorrelation] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_partial_autocorrelation] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[minimum] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[mean] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[ratio_value_number_to_time_series_length] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[change_quantiles] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[number_peaks] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[magnitude_agg_linear] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_agg_linear] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_agg_linear] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_spkt_welch_density] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_spkt_welch_density] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_spkt_welch_density] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[percentage_of_reoccurring_datapoints_to_all_datapoints] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[abs_energy] ( identifier[data_frame] . identifier[mag_sum_acc] ), identifier[pre] + literal[string] : identifier[self] . identifier[fft_aggregated] ( identifier[data_frame] . identifier[mag_sum_acc] )[ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_fft_coefficient] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_fft_coefficient] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_fft_coefficient] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_fft_coefficient] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[magnitude_fft_coefficient] [ literal[int] ][ literal[int] ], identifier[pre] + literal[string] : identifier[self] . identifier[sum_values] ( identifier[data_frame] . identifier[mag_sum_acc] )} keyword[except] : identifier[logging] . identifier[error] ( literal[string] , identifier[sys] . identifier[exc_info] ()[ literal[int] ])
def extract_features(self, data_frame, pre=''): """ This method extracts all the features available to the Tremor Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: amplitude_by_fft, frequency_by_fft, amplitude_by_welch, frequency_by_fft, bradykinesia_amplitude_by_fft, bradykinesia_frequency_by_fft, bradykinesia_amplitude_by_welch, bradykinesia_frequency_by_welch, magnitude_approximate_entropy, magnitude_autocorrelation_lag_8, magnitude_autocorrelation_lag_9, magnitude_partial_autocorrelation_lag_3, magnitude_partial_autocorrelation_lag_5, magnitude_partial_autocorrelation_lag_6, magnitude_minimum, magnitude_mean, magnitude_ratio_value_number_to_time_series_length, magnitude_change_quantiles, magnitude_number_peaks, magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept, magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue, magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept, magnitude_spkt_welch_density_coeff_2, magnitude_spkt_welch_density_coeff_5, magnitude_spkt_welch_density_coeff_8, magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints, magnitude_abs_energy, magnitude_fft_aggregated_centroid, magnitude_fft_aggregated_centroid, magnitude_fft_coefficient_abs_coeff_44, magnitude_fft_coefficient_abs_coeff_63, magnitude_fft_coefficient_abs_coeff_0, magnitude_fft_coefficient_real_coeff_0, magnitude_fft_coefficient_real_coeff_23, magnitude_sum_values :rtype: list """ try: magnitude_partial_autocorrelation = self.partial_autocorrelation(data_frame.mag_sum_acc) magnitude_agg_linear = self.agg_linear_trend(data_frame.mag_sum_acc) magnitude_spkt_welch_density = self.spkt_welch_density(data_frame.mag_sum_acc) magnitude_fft_coefficient = self.fft_coefficient(data_frame.mag_sum_acc) return {pre + 'amplitude_by_fft': self.amplitude(data_frame)[0], pre + 'frequency_by_fft': self.amplitude(data_frame)[1], pre + 'amplitude_by_welch': self.amplitude(data_frame, 'welch')[0], pre + 'frequency_by_welch': self.amplitude(data_frame, 'welch')[1], pre + 'bradykinesia_amplitude_by_fft': self.bradykinesia(data_frame)[0], pre + 'bradykinesia_frequency_by_fft': self.bradykinesia(data_frame)[1], pre + 'bradykinesia_amplitude_by_welch': self.bradykinesia(data_frame, 'welch')[0], pre + 'bradykinesia_frequency_by_welch': self.bradykinesia(data_frame, 'welch')[1], pre + 'magnitude_approximate_entropy': self.approximate_entropy(data_frame.mag_sum_acc), pre + 'magnitude_autocorrelation_lag_8': self.autocorrelation(data_frame.mag_sum_acc, 8), pre + 'magnitude_autocorrelation_lag_9': self.autocorrelation(data_frame.mag_sum_acc, 9), pre + 'magnitude_partial_autocorrelation_lag_3': magnitude_partial_autocorrelation[0][1], pre + 'magnitude_partial_autocorrelation_lag_5': magnitude_partial_autocorrelation[1][1], pre + 'magnitude_partial_autocorrelation_lag_6': magnitude_partial_autocorrelation[2][1], pre + 'magnitude_minimum': self.minimum(data_frame.mag_sum_acc), pre + 'magnitude_mean': self.mean(data_frame.mag_sum_acc), pre + 'magnitude_ratio_value_number_to_time_series_length': self.ratio_value_number_to_time_series_length(data_frame.mag_sum_acc), pre + 'magnitude_change_quantiles': self.change_quantiles(data_frame.mag_sum_acc), pre + 'magnitude_number_peaks': self.number_peaks(data_frame.mag_sum_acc), pre + 'magnitude_agg_linear_trend_min_chunk_len_5_attr_intercept': magnitude_agg_linear[0][1], pre + 'magnitude_agg_linear_trend_var_chunk_len_10_attr_rvalue': magnitude_agg_linear[1][1], pre + 'magnitude_agg_linear_trend_min_chunk_len_10_attr_intercept': magnitude_agg_linear[2][1], pre + 'magnitude_spkt_welch_density_coeff_2': magnitude_spkt_welch_density[0][1], pre + 'magnitude_spkt_welch_density_coeff_5': magnitude_spkt_welch_density[1][1], pre + 'magnitude_spkt_welch_density_coeff_8': magnitude_spkt_welch_density[2][1], pre + 'magnitude_percentage_of_reoccurring_datapoints_to_all_datapoints': self.percentage_of_reoccurring_datapoints_to_all_datapoints(data_frame.mag_sum_acc), pre + 'magnitude_abs_energy': self.abs_energy(data_frame.mag_sum_acc), pre + 'magnitude_fft_aggregated_centroid': self.fft_aggregated(data_frame.mag_sum_acc)[0][1], pre + 'magnitude_fft_coefficient_abs_coeff_44': magnitude_fft_coefficient[0][1], pre + 'magnitude_fft_coefficient_abs_coeff_63': magnitude_fft_coefficient[1][1], pre + 'magnitude_fft_coefficient_abs_coeff_0': magnitude_fft_coefficient[2][1], pre + 'magnitude_fft_coefficient_real_coeff_0': magnitude_fft_coefficient[3][1], pre + 'magnitude_fft_coefficient_real_coeff_23': magnitude_fft_coefficient[4][1], pre + 'magnitude_sum_values': self.sum_values(data_frame.mag_sum_acc)} # depends on [control=['try'], data=[]] except: logging.error('Error on TremorProcessor process, extract features: %s', sys.exc_info()[0]) # depends on [control=['except'], data=[]]
def create(self): """Create this instance. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :rtype: :class:`google.api_core.operation.Operation` :returns: an operation instance :raises Conflict: if the instance already exists """ api = self._client.instance_admin_api instance_pb = admin_v1_pb2.Instance( name=self.name, config=self.configuration_name, display_name=self.display_name, node_count=self.node_count, ) metadata = _metadata_with_prefix(self.name) future = api.create_instance( parent=self._client.project_name, instance_id=self.instance_id, instance=instance_pb, metadata=metadata, ) return future
def function[create, parameter[self]]: constant[Create this instance. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :rtype: :class:`google.api_core.operation.Operation` :returns: an operation instance :raises Conflict: if the instance already exists ] variable[api] assign[=] name[self]._client.instance_admin_api variable[instance_pb] assign[=] call[name[admin_v1_pb2].Instance, parameter[]] variable[metadata] assign[=] call[name[_metadata_with_prefix], parameter[name[self].name]] variable[future] assign[=] call[name[api].create_instance, parameter[]] return[name[future]]
keyword[def] identifier[create] ( identifier[self] ): literal[string] identifier[api] = identifier[self] . identifier[_client] . identifier[instance_admin_api] identifier[instance_pb] = identifier[admin_v1_pb2] . identifier[Instance] ( identifier[name] = identifier[self] . identifier[name] , identifier[config] = identifier[self] . identifier[configuration_name] , identifier[display_name] = identifier[self] . identifier[display_name] , identifier[node_count] = identifier[self] . identifier[node_count] , ) identifier[metadata] = identifier[_metadata_with_prefix] ( identifier[self] . identifier[name] ) identifier[future] = identifier[api] . identifier[create_instance] ( identifier[parent] = identifier[self] . identifier[_client] . identifier[project_name] , identifier[instance_id] = identifier[self] . identifier[instance_id] , identifier[instance] = identifier[instance_pb] , identifier[metadata] = identifier[metadata] , ) keyword[return] identifier[future]
def create(self): """Create this instance. See https://cloud.google.com/spanner/reference/rpc/google.spanner.admin.instance.v1#google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance .. note:: Uses the ``project`` and ``instance_id`` on the current :class:`Instance` in addition to the ``display_name``. To change them before creating, reset the values via .. code:: python instance.display_name = 'New display name' instance.instance_id = 'i-changed-my-mind' before calling :meth:`create`. :rtype: :class:`google.api_core.operation.Operation` :returns: an operation instance :raises Conflict: if the instance already exists """ api = self._client.instance_admin_api instance_pb = admin_v1_pb2.Instance(name=self.name, config=self.configuration_name, display_name=self.display_name, node_count=self.node_count) metadata = _metadata_with_prefix(self.name) future = api.create_instance(parent=self._client.project_name, instance_id=self.instance_id, instance=instance_pb, metadata=metadata) return future
def save_object(self, obj): """ Save object to disk as JSON. Generally shouldn't be called directly. """ obj.pre_save(self.jurisdiction.jurisdiction_id) filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-') self.info('save %s %s as %s', obj._type, obj, filename) self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())), cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': '))) self.output_names[obj._type].add(filename) with open(os.path.join(self.datadir, filename), 'w') as f: json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus) # validate after writing, allows for inspection on failure try: obj.validate() except ValueError as ve: if self.strict_validation: raise ve else: self.warning(ve) # after saving and validating, save subordinate objects for obj in obj._related: self.save_object(obj)
def function[save_object, parameter[self, obj]]: constant[ Save object to disk as JSON. Generally shouldn't be called directly. ] call[name[obj].pre_save, parameter[name[self].jurisdiction.jurisdiction_id]] variable[filename] assign[=] call[call[constant[{0}_{1}.json].format, parameter[name[obj]._type, name[obj]._id]].replace, parameter[constant[/], constant[-]]] call[name[self].info, parameter[constant[save %s %s as %s], name[obj]._type, name[obj], name[filename]]] call[name[self].debug, parameter[call[name[json].dumps, parameter[call[name[OrderedDict], parameter[call[name[sorted], parameter[call[call[name[obj].as_dict, parameter[]].items, parameter[]]]]]]]]]] call[call[name[self].output_names][name[obj]._type].add, parameter[name[filename]]] with call[name[open], parameter[call[name[os].path.join, parameter[name[self].datadir, name[filename]]], constant[w]]] begin[:] call[name[json].dump, parameter[call[name[obj].as_dict, parameter[]], name[f]]] <ast.Try object at 0x7da18dc9afb0> for taget[name[obj]] in starred[name[obj]._related] begin[:] call[name[self].save_object, parameter[name[obj]]]
keyword[def] identifier[save_object] ( identifier[self] , identifier[obj] ): literal[string] identifier[obj] . identifier[pre_save] ( identifier[self] . identifier[jurisdiction] . identifier[jurisdiction_id] ) identifier[filename] = literal[string] . identifier[format] ( identifier[obj] . identifier[_type] , identifier[obj] . identifier[_id] ). identifier[replace] ( literal[string] , literal[string] ) identifier[self] . identifier[info] ( literal[string] , identifier[obj] . identifier[_type] , identifier[obj] , identifier[filename] ) identifier[self] . identifier[debug] ( identifier[json] . identifier[dumps] ( identifier[OrderedDict] ( identifier[sorted] ( identifier[obj] . identifier[as_dict] (). identifier[items] ())), identifier[cls] = identifier[utils] . identifier[JSONEncoderPlus] , identifier[indent] = literal[int] , identifier[separators] =( literal[string] , literal[string] ))) identifier[self] . identifier[output_names] [ identifier[obj] . identifier[_type] ]. identifier[add] ( identifier[filename] ) keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[datadir] , identifier[filename] ), literal[string] ) keyword[as] identifier[f] : identifier[json] . identifier[dump] ( identifier[obj] . identifier[as_dict] (), identifier[f] , identifier[cls] = identifier[utils] . identifier[JSONEncoderPlus] ) keyword[try] : identifier[obj] . identifier[validate] () keyword[except] identifier[ValueError] keyword[as] identifier[ve] : keyword[if] identifier[self] . identifier[strict_validation] : keyword[raise] identifier[ve] keyword[else] : identifier[self] . identifier[warning] ( identifier[ve] ) keyword[for] identifier[obj] keyword[in] identifier[obj] . identifier[_related] : identifier[self] . identifier[save_object] ( identifier[obj] )
def save_object(self, obj): """ Save object to disk as JSON. Generally shouldn't be called directly. """ obj.pre_save(self.jurisdiction.jurisdiction_id) filename = '{0}_{1}.json'.format(obj._type, obj._id).replace('/', '-') self.info('save %s %s as %s', obj._type, obj, filename) self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())), cls=utils.JSONEncoderPlus, indent=4, separators=(',', ': '))) self.output_names[obj._type].add(filename) with open(os.path.join(self.datadir, filename), 'w') as f: json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus) # depends on [control=['with'], data=['f']] # validate after writing, allows for inspection on failure try: obj.validate() # depends on [control=['try'], data=[]] except ValueError as ve: if self.strict_validation: raise ve # depends on [control=['if'], data=[]] else: self.warning(ve) # depends on [control=['except'], data=['ve']] # after saving and validating, save subordinate objects for obj in obj._related: self.save_object(obj) # depends on [control=['for'], data=['obj']]
def action2button(action, autoraise=True, text_beside_icon=False, parent=None): """Create a QToolButton directly from a QAction object""" if parent is None: parent = action.parent() button = QToolButton(parent) button.setDefaultAction(action) button.setAutoRaise(autoraise) if text_beside_icon: button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) return button
def function[action2button, parameter[action, autoraise, text_beside_icon, parent]]: constant[Create a QToolButton directly from a QAction object] if compare[name[parent] is constant[None]] begin[:] variable[parent] assign[=] call[name[action].parent, parameter[]] variable[button] assign[=] call[name[QToolButton], parameter[name[parent]]] call[name[button].setDefaultAction, parameter[name[action]]] call[name[button].setAutoRaise, parameter[name[autoraise]]] if name[text_beside_icon] begin[:] call[name[button].setToolButtonStyle, parameter[name[Qt].ToolButtonTextBesideIcon]] return[name[button]]
keyword[def] identifier[action2button] ( identifier[action] , identifier[autoraise] = keyword[True] , identifier[text_beside_icon] = keyword[False] , identifier[parent] = keyword[None] ): literal[string] keyword[if] identifier[parent] keyword[is] keyword[None] : identifier[parent] = identifier[action] . identifier[parent] () identifier[button] = identifier[QToolButton] ( identifier[parent] ) identifier[button] . identifier[setDefaultAction] ( identifier[action] ) identifier[button] . identifier[setAutoRaise] ( identifier[autoraise] ) keyword[if] identifier[text_beside_icon] : identifier[button] . identifier[setToolButtonStyle] ( identifier[Qt] . identifier[ToolButtonTextBesideIcon] ) keyword[return] identifier[button]
def action2button(action, autoraise=True, text_beside_icon=False, parent=None): """Create a QToolButton directly from a QAction object""" if parent is None: parent = action.parent() # depends on [control=['if'], data=['parent']] button = QToolButton(parent) button.setDefaultAction(action) button.setAutoRaise(autoraise) if text_beside_icon: button.setToolButtonStyle(Qt.ToolButtonTextBesideIcon) # depends on [control=['if'], data=[]] return button
def disable(self, everything=False): """ Disable all but possibly not binning, which is needed for FF apps Parameters --------- everything : bool disable binning as well """ self.freeze() if not everything: self.xbin.enable() self.ybin.enable() self.frozen = False
def function[disable, parameter[self, everything]]: constant[ Disable all but possibly not binning, which is needed for FF apps Parameters --------- everything : bool disable binning as well ] call[name[self].freeze, parameter[]] if <ast.UnaryOp object at 0x7da18eb54400> begin[:] call[name[self].xbin.enable, parameter[]] call[name[self].ybin.enable, parameter[]] name[self].frozen assign[=] constant[False]
keyword[def] identifier[disable] ( identifier[self] , identifier[everything] = keyword[False] ): literal[string] identifier[self] . identifier[freeze] () keyword[if] keyword[not] identifier[everything] : identifier[self] . identifier[xbin] . identifier[enable] () identifier[self] . identifier[ybin] . identifier[enable] () identifier[self] . identifier[frozen] = keyword[False]
def disable(self, everything=False): """ Disable all but possibly not binning, which is needed for FF apps Parameters --------- everything : bool disable binning as well """ self.freeze() if not everything: self.xbin.enable() self.ybin.enable() # depends on [control=['if'], data=[]] self.frozen = False
def _set_cfg(self, v, load=False): """ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """cfg must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""", }) self.__cfg = t if hasattr(self, '_set'): self._set()
def function[_set_cfg, parameter[self, v, load]]: constant[ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18f00e230> name[self].__cfg assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_cfg] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[cfg] . identifier[cfg] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[False] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__cfg] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_cfg(self, v, load=False): """ Setter method for cfg, mapped from YANG variable /zoning/defined_configuration/cfg (list) If this variable is read-only (config: false) in the source YANG file, then _set_cfg is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cfg() directly. """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=YANGListType('cfg_name', cfg.cfg, yang_name='cfg', rest_name='cfg', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='cfg-name', extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}), is_container='list', yang_name='cfg', rest_name='cfg', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined CFGs', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_cfg'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'cfg must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("cfg_name",cfg.cfg, yang_name="cfg", rest_name="cfg", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'cfg-name\', extensions={u\'tailf-common\': {u\'info\': u\'List of defined CFGs\', u\'cli-no-key-completion\': None, u\'cli-full-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-full-command\': None, u\'callpoint\': u\'zone_defined_cfg\'}}), is_container=\'list\', yang_name="cfg", rest_name="cfg", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'List of defined CFGs\', u\'cli-no-key-completion\': None, u\'cli-full-no\': None, u\'cli-suppress-key-abbreviation\': None, u\'cli-full-command\': None, u\'callpoint\': u\'zone_defined_cfg\'}}, namespace=\'urn:brocade.com:mgmt:brocade-zone\', defining_module=\'brocade-zone\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__cfg = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def to_astropy_column(llwcol, cls, copy=False, dtype=None, use_numpy_dtype=False, **kwargs): """Convert a :class:`~ligo.lw.table.Column` to `astropy.table.Column` Parameters ----------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable the LIGO_LW column to convert, or an iterable cls : `~astropy.table.Column` the Astropy `~astropy.table.Column` or subclass to convert to copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` dtype : `type`, optional the data type to convert to when creating the `~astropy.table.Column` use_numpy_dtype : `bool`, optional convert object type to numpy dtype, default: `False`, only used with ``dtype=None`` **kwargs other keyword arguments are passed to the `~astropy.table.Column` creator Returns ------- column : `~astropy.table.Column` an Astropy version of the given LIGO_LW column """ if dtype is None: # try and find dtype dtype = _get_column_dtype(llwcol) if use_numpy_dtype and numpy.dtype(dtype).type is numpy.object_: # dtype maps to 'object' in numpy, try and resolve real numpy type try: dtype = NUMPY_TYPE_MAP[dtype] except KeyError: # try subclass matches (mainly for ilwdchar) for key in NUMPY_TYPE_MAP: if issubclass(dtype, key): dtype = NUMPY_TYPE_MAP[key] break else: # no subclass matches, raise raise TypeError("no mapping from object type %r to numpy " "type" % dtype) try: return cls(data=llwcol, copy=copy, dtype=dtype, **kwargs) except TypeError: # numpy tries to cast ilwdchar to int via long, which breaks if dtype is numpy.int_ and isinstance(llwcol[0], ilwdchar_types): return cls(data=map(dtype, llwcol), copy=False, dtype=dtype, **kwargs) # any other error, raise raise
def function[to_astropy_column, parameter[llwcol, cls, copy, dtype, use_numpy_dtype]]: constant[Convert a :class:`~ligo.lw.table.Column` to `astropy.table.Column` Parameters ----------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable the LIGO_LW column to convert, or an iterable cls : `~astropy.table.Column` the Astropy `~astropy.table.Column` or subclass to convert to copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` dtype : `type`, optional the data type to convert to when creating the `~astropy.table.Column` use_numpy_dtype : `bool`, optional convert object type to numpy dtype, default: `False`, only used with ``dtype=None`` **kwargs other keyword arguments are passed to the `~astropy.table.Column` creator Returns ------- column : `~astropy.table.Column` an Astropy version of the given LIGO_LW column ] if compare[name[dtype] is constant[None]] begin[:] variable[dtype] assign[=] call[name[_get_column_dtype], parameter[name[llwcol]]] if <ast.BoolOp object at 0x7da204620be0> begin[:] <ast.Try object at 0x7da204623820> <ast.Try object at 0x7da204567250>
keyword[def] identifier[to_astropy_column] ( identifier[llwcol] , identifier[cls] , identifier[copy] = keyword[False] , identifier[dtype] = keyword[None] , identifier[use_numpy_dtype] = keyword[False] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[dtype] keyword[is] keyword[None] : identifier[dtype] = identifier[_get_column_dtype] ( identifier[llwcol] ) keyword[if] identifier[use_numpy_dtype] keyword[and] identifier[numpy] . identifier[dtype] ( identifier[dtype] ). identifier[type] keyword[is] identifier[numpy] . identifier[object_] : keyword[try] : identifier[dtype] = identifier[NUMPY_TYPE_MAP] [ identifier[dtype] ] keyword[except] identifier[KeyError] : keyword[for] identifier[key] keyword[in] identifier[NUMPY_TYPE_MAP] : keyword[if] identifier[issubclass] ( identifier[dtype] , identifier[key] ): identifier[dtype] = identifier[NUMPY_TYPE_MAP] [ identifier[key] ] keyword[break] keyword[else] : keyword[raise] identifier[TypeError] ( literal[string] literal[string] % identifier[dtype] ) keyword[try] : keyword[return] identifier[cls] ( identifier[data] = identifier[llwcol] , identifier[copy] = identifier[copy] , identifier[dtype] = identifier[dtype] ,** identifier[kwargs] ) keyword[except] identifier[TypeError] : keyword[if] identifier[dtype] keyword[is] identifier[numpy] . identifier[int_] keyword[and] identifier[isinstance] ( identifier[llwcol] [ literal[int] ], identifier[ilwdchar_types] ): keyword[return] identifier[cls] ( identifier[data] = identifier[map] ( identifier[dtype] , identifier[llwcol] ), identifier[copy] = keyword[False] , identifier[dtype] = identifier[dtype] ,** identifier[kwargs] ) keyword[raise]
def to_astropy_column(llwcol, cls, copy=False, dtype=None, use_numpy_dtype=False, **kwargs): """Convert a :class:`~ligo.lw.table.Column` to `astropy.table.Column` Parameters ----------- llwcol : :class:`~ligo.lw.table.Column`, `numpy.ndarray`, iterable the LIGO_LW column to convert, or an iterable cls : `~astropy.table.Column` the Astropy `~astropy.table.Column` or subclass to convert to copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` dtype : `type`, optional the data type to convert to when creating the `~astropy.table.Column` use_numpy_dtype : `bool`, optional convert object type to numpy dtype, default: `False`, only used with ``dtype=None`` **kwargs other keyword arguments are passed to the `~astropy.table.Column` creator Returns ------- column : `~astropy.table.Column` an Astropy version of the given LIGO_LW column """ if dtype is None: # try and find dtype dtype = _get_column_dtype(llwcol) if use_numpy_dtype and numpy.dtype(dtype).type is numpy.object_: # dtype maps to 'object' in numpy, try and resolve real numpy type try: dtype = NUMPY_TYPE_MAP[dtype] # depends on [control=['try'], data=[]] except KeyError: # try subclass matches (mainly for ilwdchar) for key in NUMPY_TYPE_MAP: if issubclass(dtype, key): dtype = NUMPY_TYPE_MAP[key] break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['key']] else: # no subclass matches, raise raise TypeError('no mapping from object type %r to numpy type' % dtype) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['dtype']] try: return cls(data=llwcol, copy=copy, dtype=dtype, **kwargs) # depends on [control=['try'], data=[]] except TypeError: # numpy tries to cast ilwdchar to int via long, which breaks if dtype is numpy.int_ and isinstance(llwcol[0], ilwdchar_types): return cls(data=map(dtype, llwcol), copy=False, dtype=dtype, **kwargs) # depends on [control=['if'], data=[]] # any other error, raise raise # depends on [control=['except'], data=[]]
def kent_mean(dec=None, inc=None, di_block=None): """ Calculates the Kent mean and associated statistical parameters from either a list of declination values and a separate list of inclination values or from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a dictionary with the Kent mean and statistical parameters. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc,1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns ---------- kpars : dictionary containing Kent mean and associated statistics. Examples -------- Use lists of declination and inclination to calculate a Kent mean: >>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22]) {'Edec': 280.38683553668795, 'Einc': 64.236598921744289, 'Eta': 0.72982112760919715, 'Zdec': 40.824690028412761, 'Zeta': 6.7896823241008795, 'Zinc': 13.739412321974067, 'dec': 136.30838974272072, 'inc': 21.347784026899987, 'n': 4} Use a di_block to calculate a Kent mean (will give the same output as the example with the lists): >>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) """ if di_block is None: di_block = make_di_block(dec, inc) return pmag.dokent(di_block, len(di_block)) else: return pmag.dokent(di_block, len(di_block))
def function[kent_mean, parameter[dec, inc, di_block]]: constant[ Calculates the Kent mean and associated statistical parameters from either a list of declination values and a separate list of inclination values or from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a dictionary with the Kent mean and statistical parameters. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc,1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns ---------- kpars : dictionary containing Kent mean and associated statistics. Examples -------- Use lists of declination and inclination to calculate a Kent mean: >>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22]) {'Edec': 280.38683553668795, 'Einc': 64.236598921744289, 'Eta': 0.72982112760919715, 'Zdec': 40.824690028412761, 'Zeta': 6.7896823241008795, 'Zinc': 13.739412321974067, 'dec': 136.30838974272072, 'inc': 21.347784026899987, 'n': 4} Use a di_block to calculate a Kent mean (will give the same output as the example with the lists): >>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) ] if compare[name[di_block] is constant[None]] begin[:] variable[di_block] assign[=] call[name[make_di_block], parameter[name[dec], name[inc]]] return[call[name[pmag].dokent, parameter[name[di_block], call[name[len], parameter[name[di_block]]]]]]
keyword[def] identifier[kent_mean] ( identifier[dec] = keyword[None] , identifier[inc] = keyword[None] , identifier[di_block] = keyword[None] ): literal[string] keyword[if] identifier[di_block] keyword[is] keyword[None] : identifier[di_block] = identifier[make_di_block] ( identifier[dec] , identifier[inc] ) keyword[return] identifier[pmag] . identifier[dokent] ( identifier[di_block] , identifier[len] ( identifier[di_block] )) keyword[else] : keyword[return] identifier[pmag] . identifier[dokent] ( identifier[di_block] , identifier[len] ( identifier[di_block] ))
def kent_mean(dec=None, inc=None, di_block=None): """ Calculates the Kent mean and associated statistical parameters from either a list of declination values and a separate list of inclination values or from a di_block (a nested list a nested list of [dec,inc,1.0]). Returns a dictionary with the Kent mean and statistical parameters. Parameters ---------- dec: list of declinations inc: list of inclinations or di_block: a nested list of [dec,inc,1.0] A di_block can be provided instead of dec, inc lists in which case it will be used. Either dec, inc lists or a di_block need to passed to the function. Returns ---------- kpars : dictionary containing Kent mean and associated statistics. Examples -------- Use lists of declination and inclination to calculate a Kent mean: >>> ipmag.kent_mean(dec=[140,127,142,136],inc=[21,23,19,22]) {'Edec': 280.38683553668795, 'Einc': 64.236598921744289, 'Eta': 0.72982112760919715, 'Zdec': 40.824690028412761, 'Zeta': 6.7896823241008795, 'Zinc': 13.739412321974067, 'dec': 136.30838974272072, 'inc': 21.347784026899987, 'n': 4} Use a di_block to calculate a Kent mean (will give the same output as the example with the lists): >>> ipmag.kent_mean(di_block=[[140,21],[127,23],[142,19],[136,22]]) """ if di_block is None: di_block = make_di_block(dec, inc) return pmag.dokent(di_block, len(di_block)) # depends on [control=['if'], data=['di_block']] else: return pmag.dokent(di_block, len(di_block))
def inject_config(self, config, from_args): """ :param config: :type config: list :param from_args: :type from_args: dict """ # First get required values from labelStore runtime = self._get_runtime() whitelist = self._get_whitelist() #Run introspection on the libraries to retrieve list of libraries to link found_libraries = self._run_introspection(runtime, whitelist, verbose=True) container_path_set=set() for library in found_libraries: #disallow duplicate library targets cpath = self.__get_container_path(library) if cpath in container_path_set: continue container_path_set.add(cpath) config.append('--volume={0}:{1}'.format(library, cpath)) config.extend(['-e', 'LD_LIBRARY_PATH={0}'.format(_container_lib_location)]) config.extend(['-e', 'LIBGL_DRIVERS_PATH={0}'.format(_container_lib_location)])
def function[inject_config, parameter[self, config, from_args]]: constant[ :param config: :type config: list :param from_args: :type from_args: dict ] variable[runtime] assign[=] call[name[self]._get_runtime, parameter[]] variable[whitelist] assign[=] call[name[self]._get_whitelist, parameter[]] variable[found_libraries] assign[=] call[name[self]._run_introspection, parameter[name[runtime], name[whitelist]]] variable[container_path_set] assign[=] call[name[set], parameter[]] for taget[name[library]] in starred[name[found_libraries]] begin[:] variable[cpath] assign[=] call[name[self].__get_container_path, parameter[name[library]]] if compare[name[cpath] in name[container_path_set]] begin[:] continue call[name[container_path_set].add, parameter[name[cpath]]] call[name[config].append, parameter[call[constant[--volume={0}:{1}].format, parameter[name[library], name[cpath]]]]] call[name[config].extend, parameter[list[[<ast.Constant object at 0x7da18bc73e50>, <ast.Call object at 0x7da18bc72710>]]]] call[name[config].extend, parameter[list[[<ast.Constant object at 0x7da18bc70760>, <ast.Call object at 0x7da18bc72c20>]]]]
keyword[def] identifier[inject_config] ( identifier[self] , identifier[config] , identifier[from_args] ): literal[string] identifier[runtime] = identifier[self] . identifier[_get_runtime] () identifier[whitelist] = identifier[self] . identifier[_get_whitelist] () identifier[found_libraries] = identifier[self] . identifier[_run_introspection] ( identifier[runtime] , identifier[whitelist] , identifier[verbose] = keyword[True] ) identifier[container_path_set] = identifier[set] () keyword[for] identifier[library] keyword[in] identifier[found_libraries] : identifier[cpath] = identifier[self] . identifier[__get_container_path] ( identifier[library] ) keyword[if] identifier[cpath] keyword[in] identifier[container_path_set] : keyword[continue] identifier[container_path_set] . identifier[add] ( identifier[cpath] ) identifier[config] . identifier[append] ( literal[string] . identifier[format] ( identifier[library] , identifier[cpath] )) identifier[config] . identifier[extend] ([ literal[string] , literal[string] . identifier[format] ( identifier[_container_lib_location] )]) identifier[config] . identifier[extend] ([ literal[string] , literal[string] . identifier[format] ( identifier[_container_lib_location] )])
def inject_config(self, config, from_args): """ :param config: :type config: list :param from_args: :type from_args: dict """ # First get required values from labelStore runtime = self._get_runtime() whitelist = self._get_whitelist() #Run introspection on the libraries to retrieve list of libraries to link found_libraries = self._run_introspection(runtime, whitelist, verbose=True) container_path_set = set() for library in found_libraries: #disallow duplicate library targets cpath = self.__get_container_path(library) if cpath in container_path_set: continue # depends on [control=['if'], data=[]] container_path_set.add(cpath) config.append('--volume={0}:{1}'.format(library, cpath)) # depends on [control=['for'], data=['library']] config.extend(['-e', 'LD_LIBRARY_PATH={0}'.format(_container_lib_location)]) config.extend(['-e', 'LIBGL_DRIVERS_PATH={0}'.format(_container_lib_location)])
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = "%s:%s" % (host_context, local_unit()) else: unit = local_unit() return unit
def function[get_nagios_unit_name, parameter[relation_name]]: constant[ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to ] variable[host_context] assign[=] call[name[get_nagios_hostcontext], parameter[name[relation_name]]] if name[host_context] begin[:] variable[unit] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc73b80>, <ast.Call object at 0x7da18bc73280>]]] return[name[unit]]
keyword[def] identifier[get_nagios_unit_name] ( identifier[relation_name] = literal[string] ): literal[string] identifier[host_context] = identifier[get_nagios_hostcontext] ( identifier[relation_name] ) keyword[if] identifier[host_context] : identifier[unit] = literal[string] %( identifier[host_context] , identifier[local_unit] ()) keyword[else] : identifier[unit] = identifier[local_unit] () keyword[return] identifier[unit]
def get_nagios_unit_name(relation_name='nrpe-external-master'): """ Return the nagios unit name prepended with host_context if needed :param str relation_name: Name of relation nrpe sub joined to """ host_context = get_nagios_hostcontext(relation_name) if host_context: unit = '%s:%s' % (host_context, local_unit()) # depends on [control=['if'], data=[]] else: unit = local_unit() return unit
def get_headline(self, name): """Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set """ return self._loop.run_coroutine(self._client.get_headline(name))
def function[get_headline, parameter[self, name]]: constant[Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set ] return[call[name[self]._loop.run_coroutine, parameter[call[name[self]._client.get_headline, parameter[name[name]]]]]]
keyword[def] identifier[get_headline] ( identifier[self] , identifier[name] ): literal[string] keyword[return] identifier[self] . identifier[_loop] . identifier[run_coroutine] ( identifier[self] . identifier[_client] . identifier[get_headline] ( identifier[name] ))
def get_headline(self, name): """Get stored messages for a service. Args: name (string): The name of the service to get messages from. Returns: ServiceMessage: the headline or None if no headline has been set """ return self._loop.run_coroutine(self._client.get_headline(name))
def with_revision(self, label, number): """ Returns a Tag with a given revision """ t = self.clone() t.revision = Revision(label, number) return t
def function[with_revision, parameter[self, label, number]]: constant[ Returns a Tag with a given revision ] variable[t] assign[=] call[name[self].clone, parameter[]] name[t].revision assign[=] call[name[Revision], parameter[name[label], name[number]]] return[name[t]]
keyword[def] identifier[with_revision] ( identifier[self] , identifier[label] , identifier[number] ): literal[string] identifier[t] = identifier[self] . identifier[clone] () identifier[t] . identifier[revision] = identifier[Revision] ( identifier[label] , identifier[number] ) keyword[return] identifier[t]
def with_revision(self, label, number): """ Returns a Tag with a given revision """ t = self.clone() t.revision = Revision(label, number) return t
def parse_content_type(header): """Parse the "Content-Type" header.""" typ = subtyp = None; options = {} typ, pos = expect_re(re_token, header, 0) _, pos = expect_lit('/', header, pos) subtyp, pos = expect_re(re_token, header, pos) ctype = header[:pos] if subtyp else '' while pos < len(header): _, pos = accept_ws(header, pos) _, pos = expect_lit(';', header, pos) _, pos = accept_ws(header, pos) name, pos = expect_re(re_token, header, pos) _, pos = expect_lit('=', header, pos) char = lookahead(header, pos) if char == '"': value, pos = expect_re(re_qstring, header, pos) value = re_qpair.sub('\\1', value) elif char: value, pos = expect_re(re_token, header, pos) if name and value is not None: options[name] = value return ctype, options
def function[parse_content_type, parameter[header]]: constant[Parse the "Content-Type" header.] variable[typ] assign[=] constant[None] variable[options] assign[=] dictionary[[], []] <ast.Tuple object at 0x7da18ede6680> assign[=] call[name[expect_re], parameter[name[re_token], name[header], constant[0]]] <ast.Tuple object at 0x7da18ede6e90> assign[=] call[name[expect_lit], parameter[constant[/], name[header], name[pos]]] <ast.Tuple object at 0x7da18ede6b60> assign[=] call[name[expect_re], parameter[name[re_token], name[header], name[pos]]] variable[ctype] assign[=] <ast.IfExp object at 0x7da1b031ea70> while compare[name[pos] less[<] call[name[len], parameter[name[header]]]] begin[:] <ast.Tuple object at 0x7da1b031e260> assign[=] call[name[accept_ws], parameter[name[header], name[pos]]] <ast.Tuple object at 0x7da1b031f160> assign[=] call[name[expect_lit], parameter[constant[;], name[header], name[pos]]] <ast.Tuple object at 0x7da1b031d210> assign[=] call[name[accept_ws], parameter[name[header], name[pos]]] <ast.Tuple object at 0x7da1b031ca00> assign[=] call[name[expect_re], parameter[name[re_token], name[header], name[pos]]] <ast.Tuple object at 0x7da1b031e5c0> assign[=] call[name[expect_lit], parameter[constant[=], name[header], name[pos]]] variable[char] assign[=] call[name[lookahead], parameter[name[header], name[pos]]] if compare[name[char] equal[==] constant["]] begin[:] <ast.Tuple object at 0x7da1b031c400> assign[=] call[name[expect_re], parameter[name[re_qstring], name[header], name[pos]]] variable[value] assign[=] call[name[re_qpair].sub, parameter[constant[\1], name[value]]] if <ast.BoolOp object at 0x7da1b031c2b0> begin[:] call[name[options]][name[name]] assign[=] name[value] return[tuple[[<ast.Name object at 0x7da1b031ed10>, <ast.Name object at 0x7da1b031fe80>]]]
keyword[def] identifier[parse_content_type] ( identifier[header] ): literal[string] identifier[typ] = identifier[subtyp] = keyword[None] ; identifier[options] ={} identifier[typ] , identifier[pos] = identifier[expect_re] ( identifier[re_token] , identifier[header] , literal[int] ) identifier[_] , identifier[pos] = identifier[expect_lit] ( literal[string] , identifier[header] , identifier[pos] ) identifier[subtyp] , identifier[pos] = identifier[expect_re] ( identifier[re_token] , identifier[header] , identifier[pos] ) identifier[ctype] = identifier[header] [: identifier[pos] ] keyword[if] identifier[subtyp] keyword[else] literal[string] keyword[while] identifier[pos] < identifier[len] ( identifier[header] ): identifier[_] , identifier[pos] = identifier[accept_ws] ( identifier[header] , identifier[pos] ) identifier[_] , identifier[pos] = identifier[expect_lit] ( literal[string] , identifier[header] , identifier[pos] ) identifier[_] , identifier[pos] = identifier[accept_ws] ( identifier[header] , identifier[pos] ) identifier[name] , identifier[pos] = identifier[expect_re] ( identifier[re_token] , identifier[header] , identifier[pos] ) identifier[_] , identifier[pos] = identifier[expect_lit] ( literal[string] , identifier[header] , identifier[pos] ) identifier[char] = identifier[lookahead] ( identifier[header] , identifier[pos] ) keyword[if] identifier[char] == literal[string] : identifier[value] , identifier[pos] = identifier[expect_re] ( identifier[re_qstring] , identifier[header] , identifier[pos] ) identifier[value] = identifier[re_qpair] . identifier[sub] ( literal[string] , identifier[value] ) keyword[elif] identifier[char] : identifier[value] , identifier[pos] = identifier[expect_re] ( identifier[re_token] , identifier[header] , identifier[pos] ) keyword[if] identifier[name] keyword[and] identifier[value] keyword[is] keyword[not] keyword[None] : identifier[options] [ identifier[name] ]= identifier[value] keyword[return] identifier[ctype] , identifier[options]
def parse_content_type(header): """Parse the "Content-Type" header.""" typ = subtyp = None options = {} (typ, pos) = expect_re(re_token, header, 0) (_, pos) = expect_lit('/', header, pos) (subtyp, pos) = expect_re(re_token, header, pos) ctype = header[:pos] if subtyp else '' while pos < len(header): (_, pos) = accept_ws(header, pos) (_, pos) = expect_lit(';', header, pos) (_, pos) = accept_ws(header, pos) (name, pos) = expect_re(re_token, header, pos) (_, pos) = expect_lit('=', header, pos) char = lookahead(header, pos) if char == '"': (value, pos) = expect_re(re_qstring, header, pos) value = re_qpair.sub('\\1', value) # depends on [control=['if'], data=[]] elif char: (value, pos) = expect_re(re_token, header, pos) # depends on [control=['if'], data=[]] if name and value is not None: options[name] = value # depends on [control=['if'], data=[]] # depends on [control=['while'], data=['pos']] return (ctype, options)
def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ if self.check_concurrent_action: return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text("%s,%s" % (obj.pk, get_revision_of_object(obj)))) else: # pragma: no cover return super(ConcurrencyActionMixin, self).action_checkbox(obj)
def function[action_checkbox, parameter[self, obj]]: constant[ A list_display column containing a checkbox widget. ] if name[self].check_concurrent_action begin[:] return[call[name[helpers].checkbox.render, parameter[name[helpers].ACTION_CHECKBOX_NAME, call[name[force_text], parameter[binary_operation[constant[%s,%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0717970>, <ast.Call object at 0x7da1b0717700>]]]]]]]]
keyword[def] identifier[action_checkbox] ( identifier[self] , identifier[obj] ): literal[string] keyword[if] identifier[self] . identifier[check_concurrent_action] : keyword[return] identifier[helpers] . identifier[checkbox] . identifier[render] ( identifier[helpers] . identifier[ACTION_CHECKBOX_NAME] , identifier[force_text] ( literal[string] %( identifier[obj] . identifier[pk] , identifier[get_revision_of_object] ( identifier[obj] )))) keyword[else] : keyword[return] identifier[super] ( identifier[ConcurrencyActionMixin] , identifier[self] ). identifier[action_checkbox] ( identifier[obj] )
def action_checkbox(self, obj): """ A list_display column containing a checkbox widget. """ if self.check_concurrent_action: return helpers.checkbox.render(helpers.ACTION_CHECKBOX_NAME, force_text('%s,%s' % (obj.pk, get_revision_of_object(obj)))) # depends on [control=['if'], data=[]] else: # pragma: no cover return super(ConcurrencyActionMixin, self).action_checkbox(obj)
def maybe_clean(self): """Clean the cache if it's time to do so.""" now = time.time() if self.next_cleaning <= now: keys_to_delete = [] for (k, v) in self.data.iteritems(): if v.expiration <= now: keys_to_delete.append(k) for k in keys_to_delete: del self.data[k] now = time.time() self.next_cleaning = now + self.cleaning_interval
def function[maybe_clean, parameter[self]]: constant[Clean the cache if it's time to do so.] variable[now] assign[=] call[name[time].time, parameter[]] if compare[name[self].next_cleaning less_or_equal[<=] name[now]] begin[:] variable[keys_to_delete] assign[=] list[[]] for taget[tuple[[<ast.Name object at 0x7da1b0912c50>, <ast.Name object at 0x7da1b0910760>]]] in starred[call[name[self].data.iteritems, parameter[]]] begin[:] if compare[name[v].expiration less_or_equal[<=] name[now]] begin[:] call[name[keys_to_delete].append, parameter[name[k]]] for taget[name[k]] in starred[name[keys_to_delete]] begin[:] <ast.Delete object at 0x7da1b0912440> variable[now] assign[=] call[name[time].time, parameter[]] name[self].next_cleaning assign[=] binary_operation[name[now] + name[self].cleaning_interval]
keyword[def] identifier[maybe_clean] ( identifier[self] ): literal[string] identifier[now] = identifier[time] . identifier[time] () keyword[if] identifier[self] . identifier[next_cleaning] <= identifier[now] : identifier[keys_to_delete] =[] keyword[for] ( identifier[k] , identifier[v] ) keyword[in] identifier[self] . identifier[data] . identifier[iteritems] (): keyword[if] identifier[v] . identifier[expiration] <= identifier[now] : identifier[keys_to_delete] . identifier[append] ( identifier[k] ) keyword[for] identifier[k] keyword[in] identifier[keys_to_delete] : keyword[del] identifier[self] . identifier[data] [ identifier[k] ] identifier[now] = identifier[time] . identifier[time] () identifier[self] . identifier[next_cleaning] = identifier[now] + identifier[self] . identifier[cleaning_interval]
def maybe_clean(self): """Clean the cache if it's time to do so.""" now = time.time() if self.next_cleaning <= now: keys_to_delete = [] for (k, v) in self.data.iteritems(): if v.expiration <= now: keys_to_delete.append(k) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] for k in keys_to_delete: del self.data[k] # depends on [control=['for'], data=['k']] now = time.time() self.next_cleaning = now + self.cleaning_interval # depends on [control=['if'], data=['now']]
def to_sqlite3(self, conn, target, *args, **kwargs): """ Saves the sequence to sqlite3 database. Target table must be created in advance. The table schema is inferred from the elements in the sequence if only target table name is supplied. >>> seq([(1, 'Tom'), (2, 'Jack')])\ .to_sqlite3('users.db', 'INSERT INTO user (id, name) VALUES (?, ?)') >>> seq([{'id': 1, 'name': 'Tom'}, {'id': 2, 'name': 'Jack'}]).to_sqlite3(conn, 'user') :param conn: path or sqlite connection, cursor :param target: SQL query string or table name :param args: passed to sqlite3.connect :param kwargs: passed to sqlite3.connect """ # pylint: disable=no-member insert_regex = re.compile(r'(insert|update)\s+into', flags=re.IGNORECASE) if insert_regex.match(target): insert_f = self._to_sqlite3_by_query else: insert_f = self._to_sqlite3_by_table if isinstance(conn, (sqlite3.Connection, sqlite3.Cursor)): insert_f(conn, target) conn.commit() elif isinstance(conn, str): with sqlite3.connect(conn, *args, **kwargs) as input_conn: insert_f(input_conn, target) input_conn.commit() else: raise ValueError('conn must be a must be a file path or sqlite3 Connection/Cursor')
def function[to_sqlite3, parameter[self, conn, target]]: constant[ Saves the sequence to sqlite3 database. Target table must be created in advance. The table schema is inferred from the elements in the sequence if only target table name is supplied. >>> seq([(1, 'Tom'), (2, 'Jack')]) .to_sqlite3('users.db', 'INSERT INTO user (id, name) VALUES (?, ?)') >>> seq([{'id': 1, 'name': 'Tom'}, {'id': 2, 'name': 'Jack'}]).to_sqlite3(conn, 'user') :param conn: path or sqlite connection, cursor :param target: SQL query string or table name :param args: passed to sqlite3.connect :param kwargs: passed to sqlite3.connect ] variable[insert_regex] assign[=] call[name[re].compile, parameter[constant[(insert|update)\s+into]]] if call[name[insert_regex].match, parameter[name[target]]] begin[:] variable[insert_f] assign[=] name[self]._to_sqlite3_by_query if call[name[isinstance], parameter[name[conn], tuple[[<ast.Attribute object at 0x7da204565f30>, <ast.Attribute object at 0x7da204564730>]]]] begin[:] call[name[insert_f], parameter[name[conn], name[target]]] call[name[conn].commit, parameter[]]
keyword[def] identifier[to_sqlite3] ( identifier[self] , identifier[conn] , identifier[target] ,* identifier[args] ,** identifier[kwargs] ): literal[string] identifier[insert_regex] = identifier[re] . identifier[compile] ( literal[string] , identifier[flags] = identifier[re] . identifier[IGNORECASE] ) keyword[if] identifier[insert_regex] . identifier[match] ( identifier[target] ): identifier[insert_f] = identifier[self] . identifier[_to_sqlite3_by_query] keyword[else] : identifier[insert_f] = identifier[self] . identifier[_to_sqlite3_by_table] keyword[if] identifier[isinstance] ( identifier[conn] ,( identifier[sqlite3] . identifier[Connection] , identifier[sqlite3] . identifier[Cursor] )): identifier[insert_f] ( identifier[conn] , identifier[target] ) identifier[conn] . identifier[commit] () keyword[elif] identifier[isinstance] ( identifier[conn] , identifier[str] ): keyword[with] identifier[sqlite3] . identifier[connect] ( identifier[conn] ,* identifier[args] ,** identifier[kwargs] ) keyword[as] identifier[input_conn] : identifier[insert_f] ( identifier[input_conn] , identifier[target] ) identifier[input_conn] . identifier[commit] () keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] )
def to_sqlite3(self, conn, target, *args, **kwargs): """ Saves the sequence to sqlite3 database. Target table must be created in advance. The table schema is inferred from the elements in the sequence if only target table name is supplied. >>> seq([(1, 'Tom'), (2, 'Jack')]) .to_sqlite3('users.db', 'INSERT INTO user (id, name) VALUES (?, ?)') >>> seq([{'id': 1, 'name': 'Tom'}, {'id': 2, 'name': 'Jack'}]).to_sqlite3(conn, 'user') :param conn: path or sqlite connection, cursor :param target: SQL query string or table name :param args: passed to sqlite3.connect :param kwargs: passed to sqlite3.connect """ # pylint: disable=no-member insert_regex = re.compile('(insert|update)\\s+into', flags=re.IGNORECASE) if insert_regex.match(target): insert_f = self._to_sqlite3_by_query # depends on [control=['if'], data=[]] else: insert_f = self._to_sqlite3_by_table if isinstance(conn, (sqlite3.Connection, sqlite3.Cursor)): insert_f(conn, target) conn.commit() # depends on [control=['if'], data=[]] elif isinstance(conn, str): with sqlite3.connect(conn, *args, **kwargs) as input_conn: insert_f(input_conn, target) input_conn.commit() # depends on [control=['with'], data=['input_conn']] # depends on [control=['if'], data=[]] else: raise ValueError('conn must be a must be a file path or sqlite3 Connection/Cursor')
def create_paramnames_file(self): """The param_names file lists every parameter's analysis_path and Latex tag, and is used for *GetDist* visualization. The parameter names are determined from the class instance names of the model_mapper. Latex tags are properties of each model class.""" paramnames_names = self.variable.param_names paramnames_labels = self.param_labels with open(self.file_param_names, 'w') as paramnames: for i in range(self.variable.prior_count): line = paramnames_names[i] line += ' ' * (70 - len(line)) + paramnames_labels[i] paramnames.write(line + '\n')
def function[create_paramnames_file, parameter[self]]: constant[The param_names file lists every parameter's analysis_path and Latex tag, and is used for *GetDist* visualization. The parameter names are determined from the class instance names of the model_mapper. Latex tags are properties of each model class.] variable[paramnames_names] assign[=] name[self].variable.param_names variable[paramnames_labels] assign[=] name[self].param_labels with call[name[open], parameter[name[self].file_param_names, constant[w]]] begin[:] for taget[name[i]] in starred[call[name[range], parameter[name[self].variable.prior_count]]] begin[:] variable[line] assign[=] call[name[paramnames_names]][name[i]] <ast.AugAssign object at 0x7da1b24e0af0> call[name[paramnames].write, parameter[binary_operation[name[line] + constant[ ]]]]
keyword[def] identifier[create_paramnames_file] ( identifier[self] ): literal[string] identifier[paramnames_names] = identifier[self] . identifier[variable] . identifier[param_names] identifier[paramnames_labels] = identifier[self] . identifier[param_labels] keyword[with] identifier[open] ( identifier[self] . identifier[file_param_names] , literal[string] ) keyword[as] identifier[paramnames] : keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[self] . identifier[variable] . identifier[prior_count] ): identifier[line] = identifier[paramnames_names] [ identifier[i] ] identifier[line] += literal[string] *( literal[int] - identifier[len] ( identifier[line] ))+ identifier[paramnames_labels] [ identifier[i] ] identifier[paramnames] . identifier[write] ( identifier[line] + literal[string] )
def create_paramnames_file(self): """The param_names file lists every parameter's analysis_path and Latex tag, and is used for *GetDist* visualization. The parameter names are determined from the class instance names of the model_mapper. Latex tags are properties of each model class.""" paramnames_names = self.variable.param_names paramnames_labels = self.param_labels with open(self.file_param_names, 'w') as paramnames: for i in range(self.variable.prior_count): line = paramnames_names[i] line += ' ' * (70 - len(line)) + paramnames_labels[i] paramnames.write(line + '\n') # depends on [control=['for'], data=['i']] # depends on [control=['with'], data=['paramnames']]
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename): """ graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF """ maximum_yvalue = -float('inf') minimum_yvalue = float('inf') plots = curate_plot_list(list_of_plots) plot_count = len(plots) if plot_count == 0: return False, None graph_height, graph_width, graph_title = get_graph_metadata(plots) current_plot_count = 0 fig, axis = plt.subplots() fig.set_size_inches(graph_width, graph_height) if plot_count < 2: fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET) else: fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2)) # Generate each plot on the graph for plot in plots: current_plot_count += 1 logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]') xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',') axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label) axis.legend() maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)) minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)) # Set properties of the plots axis.yaxis.set_ticks_position('left') axis.set_xlabel(plots[0].x_label) axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE) axis.set_ylim([minimum_yvalue, maximum_yvalue]) axis.yaxis.grid(True) axis.xaxis.grid(True) axis.set_title(graph_title) plot_file_name = os.path.join(output_directory, output_filename + ".png") fig.savefig(plot_file_name) plt.close() # Create html fragment to be used for creation of the report with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file: div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' + resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) + '" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>') return True, os.path.join(output_directory, output_filename + '.div')
def function[graph_data_on_the_same_graph, parameter[list_of_plots, output_directory, resource_path, output_filename]]: constant[ graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF ] variable[maximum_yvalue] assign[=] <ast.UnaryOp object at 0x7da1aff6da50> variable[minimum_yvalue] assign[=] call[name[float], parameter[constant[inf]]] variable[plots] assign[=] call[name[curate_plot_list], parameter[name[list_of_plots]]] variable[plot_count] assign[=] call[name[len], parameter[name[plots]]] if compare[name[plot_count] equal[==] constant[0]] begin[:] return[tuple[[<ast.Constant object at 0x7da1aff6e230>, <ast.Constant object at 0x7da1aff6d7e0>]]] <ast.Tuple object at 0x7da1aff6f580> assign[=] call[name[get_graph_metadata], parameter[name[plots]]] variable[current_plot_count] assign[=] constant[0] <ast.Tuple object at 0x7da1aff6d000> assign[=] call[name[plt].subplots, parameter[]] call[name[fig].set_size_inches, parameter[name[graph_width], name[graph_height]]] if compare[name[plot_count] less[<] constant[2]] begin[:] call[name[fig].subplots_adjust, parameter[]] for taget[name[plot]] in starred[name[plots]] begin[:] <ast.AugAssign object at 0x7da18f00f730> call[name[logger].info, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[Processing: ] + name[plot].input_csv] + constant[ [ ]] + name[output_filename]] + constant[ ]]]]] <ast.Tuple object at 0x7da1b00b77f0> assign[=] call[name[numpy].loadtxt, parameter[name[plot].input_csv]] call[name[axis].plot, parameter[name[xval], name[yval]]] call[name[axis].legend, parameter[]] variable[maximum_yvalue] assign[=] call[name[max], parameter[name[maximum_yvalue], binary_operation[call[name[numpy].amax, parameter[name[yval]]] * binary_operation[constant[1.0] + binary_operation[name[CONSTANTS].ZOOM_FACTOR * name[current_plot_count]]]]]] variable[minimum_yvalue] assign[=] call[name[min], parameter[name[minimum_yvalue], binary_operation[call[name[numpy].amin, parameter[name[yval]]] * binary_operation[constant[1.0] - binary_operation[name[CONSTANTS].ZOOM_FACTOR * name[current_plot_count]]]]]] call[name[axis].yaxis.set_ticks_position, parameter[constant[left]]] call[name[axis].set_xlabel, parameter[call[name[plots]][constant[0]].x_label]] call[name[axis].set_ylabel, parameter[call[name[plots]][constant[0]].y_label]] call[name[axis].set_ylim, parameter[list[[<ast.Name object at 0x7da1aff777c0>, <ast.Name object at 0x7da1aff75030>]]]] call[name[axis].yaxis.grid, parameter[constant[True]]] call[name[axis].xaxis.grid, parameter[constant[True]]] call[name[axis].set_title, parameter[name[graph_title]]] variable[plot_file_name] assign[=] call[name[os].path.join, parameter[name[output_directory], binary_operation[name[output_filename] + constant[.png]]]] call[name[fig].savefig, parameter[name[plot_file_name]]] call[name[plt].close, parameter[]] with call[name[open], parameter[call[name[os].path.join, parameter[name[output_directory], binary_operation[name[output_filename] + constant[.div]]]], constant[w]]] begin[:] call[name[div_file].write, parameter[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[<a name="] + call[call[call[name[os].path.basename, parameter[name[plot_file_name]]].replace, parameter[constant[.png], constant[]]].replace, parameter[constant[.diff], constant[]]]] + constant["></a><div class="col-md-12"><img src="]] + name[resource_path]] + constant[/]] + call[name[os].path.basename, parameter[name[plot_file_name]]]] + constant[" id="]] + call[name[os].path.basename, parameter[name[plot_file_name]]]] + constant[" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>]] + call[name[os].path.basename, parameter[name[plot_file_name]]]] + constant[<br/></p></div>]]]] return[tuple[[<ast.Constant object at 0x7da1afe3d660>, <ast.Call object at 0x7da1afe3d630>]]]
keyword[def] identifier[graph_data_on_the_same_graph] ( identifier[list_of_plots] , identifier[output_directory] , identifier[resource_path] , identifier[output_filename] ): literal[string] identifier[maximum_yvalue] =- identifier[float] ( literal[string] ) identifier[minimum_yvalue] = identifier[float] ( literal[string] ) identifier[plots] = identifier[curate_plot_list] ( identifier[list_of_plots] ) identifier[plot_count] = identifier[len] ( identifier[plots] ) keyword[if] identifier[plot_count] == literal[int] : keyword[return] keyword[False] , keyword[None] identifier[graph_height] , identifier[graph_width] , identifier[graph_title] = identifier[get_graph_metadata] ( identifier[plots] ) identifier[current_plot_count] = literal[int] identifier[fig] , identifier[axis] = identifier[plt] . identifier[subplots] () identifier[fig] . identifier[set_size_inches] ( identifier[graph_width] , identifier[graph_height] ) keyword[if] identifier[plot_count] < literal[int] : identifier[fig] . identifier[subplots_adjust] ( identifier[left] = identifier[CONSTANTS] . identifier[SUBPLOT_LEFT_OFFSET] , identifier[bottom] = identifier[CONSTANTS] . identifier[SUBPLOT_BOTTOM_OFFSET] , identifier[right] = identifier[CONSTANTS] . identifier[SUBPLOT_RIGHT_OFFSET] ) keyword[else] : identifier[fig] . identifier[subplots_adjust] ( identifier[left] = identifier[CONSTANTS] . identifier[SUBPLOT_LEFT_OFFSET] , identifier[bottom] = identifier[CONSTANTS] . identifier[SUBPLOT_BOTTOM_OFFSET] , identifier[right] = identifier[CONSTANTS] . identifier[SUBPLOT_RIGHT_OFFSET] - identifier[CONSTANTS] . identifier[Y_AXIS_OFFSET] *( identifier[plot_count] - literal[int] )) keyword[for] identifier[plot] keyword[in] identifier[plots] : identifier[current_plot_count] += literal[int] identifier[logger] . identifier[info] ( literal[string] + identifier[plot] . identifier[input_csv] + literal[string] + identifier[output_filename] + literal[string] ) identifier[xval] , identifier[yval] = identifier[numpy] . identifier[loadtxt] ( identifier[plot] . identifier[input_csv] , identifier[unpack] = keyword[True] , identifier[delimiter] = literal[string] ) identifier[axis] . identifier[plot] ( identifier[xval] , identifier[yval] , identifier[linestyle] = literal[string] , identifier[marker] = keyword[None] , identifier[color] = identifier[get_current_color] ( identifier[current_plot_count] ), identifier[label] = identifier[plot] . identifier[plot_label] ) identifier[axis] . identifier[legend] () identifier[maximum_yvalue] = identifier[max] ( identifier[maximum_yvalue] , identifier[numpy] . identifier[amax] ( identifier[yval] )*( literal[int] + identifier[CONSTANTS] . identifier[ZOOM_FACTOR] * identifier[current_plot_count] )) identifier[minimum_yvalue] = identifier[min] ( identifier[minimum_yvalue] , identifier[numpy] . identifier[amin] ( identifier[yval] )*( literal[int] - identifier[CONSTANTS] . identifier[ZOOM_FACTOR] * identifier[current_plot_count] )) identifier[axis] . identifier[yaxis] . identifier[set_ticks_position] ( literal[string] ) identifier[axis] . identifier[set_xlabel] ( identifier[plots] [ literal[int] ]. identifier[x_label] ) identifier[axis] . identifier[set_ylabel] ( identifier[plots] [ literal[int] ]. identifier[y_label] , identifier[fontsize] = identifier[CONSTANTS] . identifier[Y_LABEL_FONTSIZE] ) identifier[axis] . identifier[set_ylim] ([ identifier[minimum_yvalue] , identifier[maximum_yvalue] ]) identifier[axis] . identifier[yaxis] . identifier[grid] ( keyword[True] ) identifier[axis] . identifier[xaxis] . identifier[grid] ( keyword[True] ) identifier[axis] . identifier[set_title] ( identifier[graph_title] ) identifier[plot_file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[output_directory] , identifier[output_filename] + literal[string] ) identifier[fig] . identifier[savefig] ( identifier[plot_file_name] ) identifier[plt] . identifier[close] () keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[join] ( identifier[output_directory] , identifier[output_filename] + literal[string] ), literal[string] ) keyword[as] identifier[div_file] : identifier[div_file] . identifier[write] ( literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[plot_file_name] ). identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )+ literal[string] + identifier[resource_path] + literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[plot_file_name] )+ literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[plot_file_name] )+ literal[string] + identifier[os] . identifier[path] . identifier[basename] ( identifier[plot_file_name] )+ literal[string] ) keyword[return] keyword[True] , identifier[os] . identifier[path] . identifier[join] ( identifier[output_directory] , identifier[output_filename] + literal[string] )
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename): """ graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF """ maximum_yvalue = -float('inf') minimum_yvalue = float('inf') plots = curate_plot_list(list_of_plots) plot_count = len(plots) if plot_count == 0: return (False, None) # depends on [control=['if'], data=[]] (graph_height, graph_width, graph_title) = get_graph_metadata(plots) current_plot_count = 0 (fig, axis) = plt.subplots() fig.set_size_inches(graph_width, graph_height) if plot_count < 2: fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET) # depends on [control=['if'], data=[]] else: fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2)) # Generate each plot on the graph for plot in plots: current_plot_count += 1 logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]') (xval, yval) = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',') axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label) axis.legend() maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)) minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)) # depends on [control=['for'], data=['plot']] # Set properties of the plots axis.yaxis.set_ticks_position('left') axis.set_xlabel(plots[0].x_label) axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE) axis.set_ylim([minimum_yvalue, maximum_yvalue]) axis.yaxis.grid(True) axis.xaxis.grid(True) axis.set_title(graph_title) plot_file_name = os.path.join(output_directory, output_filename + '.png') fig.savefig(plot_file_name) plt.close() # Create html fragment to be used for creation of the report with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file: div_file.write('<a name="' + os.path.basename(plot_file_name).replace('.png', '').replace('.diff', '') + '"></a><div class="col-md-12"><img src="' + resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) + '" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>') # depends on [control=['with'], data=['div_file']] return (True, os.path.join(output_directory, output_filename + '.div'))
def st_atime(self): """Return the access time in seconds.""" atime = self._st_atime_ns / 1e9 return atime if self.use_float else int(atime)
def function[st_atime, parameter[self]]: constant[Return the access time in seconds.] variable[atime] assign[=] binary_operation[name[self]._st_atime_ns / constant[1000000000.0]] return[<ast.IfExp object at 0x7da2047ea140>]
keyword[def] identifier[st_atime] ( identifier[self] ): literal[string] identifier[atime] = identifier[self] . identifier[_st_atime_ns] / literal[int] keyword[return] identifier[atime] keyword[if] identifier[self] . identifier[use_float] keyword[else] identifier[int] ( identifier[atime] )
def st_atime(self): """Return the access time in seconds.""" atime = self._st_atime_ns / 1000000000.0 return atime if self.use_float else int(atime)
def indices(self, data): '''Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ''' duration = self.data_duration(data) for start in range(0, duration - self.duration, self.stride): yield start
def function[indices, parameter[self, data]]: constant[Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch ] variable[duration] assign[=] call[name[self].data_duration, parameter[name[data]]] for taget[name[start]] in starred[call[name[range], parameter[constant[0], binary_operation[name[duration] - name[self].duration], name[self].stride]]] begin[:] <ast.Yield object at 0x7da1b10afc10>
keyword[def] identifier[indices] ( identifier[self] , identifier[data] ): literal[string] identifier[duration] = identifier[self] . identifier[data_duration] ( identifier[data] ) keyword[for] identifier[start] keyword[in] identifier[range] ( literal[int] , identifier[duration] - identifier[self] . identifier[duration] , identifier[self] . identifier[stride] ): keyword[yield] identifier[start]
def indices(self, data): """Generate patch start indices Parameters ---------- data : dict of np.ndarray As produced by pumpp.transform Yields ------ start : int >= 0 The start index of a sample patch """ duration = self.data_duration(data) for start in range(0, duration - self.duration, self.stride): yield start # depends on [control=['for'], data=['start']]
def crossover_template(cls, length, points=2): """Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. """ assert isinstance(length, int) and length >= 0 assert isinstance(points, int) and points >= 0 # Select the crossover points. points = random.sample(range(length + 1), points) # Prep the points for the loop. points.sort() points.append(length) # Fill the bits in with alternating ranges of 0 and 1 according to # the selected crossover points. previous = 0 include_range = bool(random.randrange(2)) pieces = [] for point in points: if point > previous: fill = (numpy.ones if include_range else numpy.zeros) pieces.append(fill(point - previous, dtype=bool)) include_range = not include_range previous = point bits = numpy.concatenate(pieces) bits.flags.writeable = False return cls(bits)
def function[crossover_template, parameter[cls, length, points]]: constant[Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. ] assert[<ast.BoolOp object at 0x7da1b0f5afb0>] assert[<ast.BoolOp object at 0x7da1b0f59000>] variable[points] assign[=] call[name[random].sample, parameter[call[name[range], parameter[binary_operation[name[length] + constant[1]]]], name[points]]] call[name[points].sort, parameter[]] call[name[points].append, parameter[name[length]]] variable[previous] assign[=] constant[0] variable[include_range] assign[=] call[name[bool], parameter[call[name[random].randrange, parameter[constant[2]]]]] variable[pieces] assign[=] list[[]] for taget[name[point]] in starred[name[points]] begin[:] if compare[name[point] greater[>] name[previous]] begin[:] variable[fill] assign[=] <ast.IfExp object at 0x7da1b10c6920> call[name[pieces].append, parameter[call[name[fill], parameter[binary_operation[name[point] - name[previous]]]]]] variable[include_range] assign[=] <ast.UnaryOp object at 0x7da1b0f58550> variable[previous] assign[=] name[point] variable[bits] assign[=] call[name[numpy].concatenate, parameter[name[pieces]]] name[bits].flags.writeable assign[=] constant[False] return[call[name[cls], parameter[name[bits]]]]
keyword[def] identifier[crossover_template] ( identifier[cls] , identifier[length] , identifier[points] = literal[int] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[length] , identifier[int] ) keyword[and] identifier[length] >= literal[int] keyword[assert] identifier[isinstance] ( identifier[points] , identifier[int] ) keyword[and] identifier[points] >= literal[int] identifier[points] = identifier[random] . identifier[sample] ( identifier[range] ( identifier[length] + literal[int] ), identifier[points] ) identifier[points] . identifier[sort] () identifier[points] . identifier[append] ( identifier[length] ) identifier[previous] = literal[int] identifier[include_range] = identifier[bool] ( identifier[random] . identifier[randrange] ( literal[int] )) identifier[pieces] =[] keyword[for] identifier[point] keyword[in] identifier[points] : keyword[if] identifier[point] > identifier[previous] : identifier[fill] =( identifier[numpy] . identifier[ones] keyword[if] identifier[include_range] keyword[else] identifier[numpy] . identifier[zeros] ) identifier[pieces] . identifier[append] ( identifier[fill] ( identifier[point] - identifier[previous] , identifier[dtype] = identifier[bool] )) identifier[include_range] = keyword[not] identifier[include_range] identifier[previous] = identifier[point] identifier[bits] = identifier[numpy] . identifier[concatenate] ( identifier[pieces] ) identifier[bits] . identifier[flags] . identifier[writeable] = keyword[False] keyword[return] identifier[cls] ( identifier[bits] )
def crossover_template(cls, length, points=2): """Create a crossover template with the given number of points. The crossover template can be used as a mask to crossover two bitstrings of the same length. Usage: assert len(parent1) == len(parent2) template = BitString.crossover_template(len(parent1)) inv_template = ~template child1 = (parent1 & template) | (parent2 & inv_template) child2 = (parent1 & inv_template) | (parent2 & template) Arguments: length: An int, indicating the desired length of the result. points: An int, the number of crossover points. Return: A BitString instance of the requested length which can be used as a crossover template. """ assert isinstance(length, int) and length >= 0 assert isinstance(points, int) and points >= 0 # Select the crossover points. points = random.sample(range(length + 1), points) # Prep the points for the loop. points.sort() points.append(length) # Fill the bits in with alternating ranges of 0 and 1 according to # the selected crossover points. previous = 0 include_range = bool(random.randrange(2)) pieces = [] for point in points: if point > previous: fill = numpy.ones if include_range else numpy.zeros pieces.append(fill(point - previous, dtype=bool)) # depends on [control=['if'], data=['point', 'previous']] include_range = not include_range previous = point # depends on [control=['for'], data=['point']] bits = numpy.concatenate(pieces) bits.flags.writeable = False return cls(bits)
def process_line( line, # type: Text filename, # type: str line_number, # type: int finder=None, # type: Optional[PackageFinder] comes_from=None, # type: Optional[str] options=None, # type: Optional[optparse.Values] session=None, # type: Optional[PipSession] wheel_cache=None, # type: Optional[WheelCache] use_pep517=None, # type: Optional[bool] constraint=False # type: bool ): # type: (...) -> Iterator[InstallRequirement] """Process a single requirements line; This can result in creating/yielding requirements, or updating the finder. For lines that contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS_REQ, and they are scoped to the requirement. Other options from SUPPORTED_OPTIONS may be present, but are ignored. For lines that do not contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may be present, but are ignored. These lines may contain multiple options (although our docs imply only one is supported), and all our parsed and affect the finder. :param constraint: If True, parsing a constraints file. :param options: OptionParser options that we may update """ parser = build_parser(line) defaults = parser.get_default_values() defaults.index_url = None if finder: defaults.format_control = finder.format_control args_str, options_str = break_args_options(line) # Prior to 2.7.3, shlex cannot deal with unicode entries if sys.version_info < (2, 7, 3): # https://github.com/python/mypy/issues/1174 options_str = options_str.encode('utf8') # type: ignore # https://github.com/python/mypy/issues/1174 opts, _ = parser.parse_args( shlex.split(options_str), defaults) # type: ignore # preserve for the nested code path line_comes_from = '%s %s (line %s)' % ( '-c' if constraint else '-r', filename, line_number, ) # yield a line requirement if args_str: isolated = options.isolated_mode if options else False if options: cmdoptions.check_install_build_global(options, opts) # get the options that apply to requirements req_options = {} for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] yield install_req_from_line( args_str, line_comes_from, constraint=constraint, use_pep517=use_pep517, isolated=isolated, options=req_options, wheel_cache=wheel_cache ) # yield an editable requirement elif opts.editables: isolated = options.isolated_mode if options else False yield install_req_from_editable( opts.editables[0], comes_from=line_comes_from, use_pep517=use_pep517, constraint=constraint, isolated=isolated, wheel_cache=wheel_cache ) # parse a nested requirements file elif opts.requirements or opts.constraints: if opts.requirements: req_path = opts.requirements[0] nested_constraint = False else: req_path = opts.constraints[0] nested_constraint = True # original file is over http if SCHEME_RE.search(filename): # do a url join so relative paths work req_path = urllib_parse.urljoin(filename, req_path) # original file and nested file are paths elif not SCHEME_RE.search(req_path): # do a join so relative paths work req_path = os.path.join(os.path.dirname(filename), req_path) # TODO: Why not use `comes_from='-r {} (line {})'` here as well? parsed_reqs = parse_requirements( req_path, finder, comes_from, options, session, constraint=nested_constraint, wheel_cache=wheel_cache ) for req in parsed_reqs: yield req # percolate hash-checking option upward elif opts.require_hashes: options.require_hashes = opts.require_hashes # set finder options elif finder: if opts.index_url: finder.index_urls = [opts.index_url] if opts.no_index is True: finder.index_urls = [] if opts.extra_index_urls: finder.index_urls.extend(opts.extra_index_urls) if opts.find_links: # FIXME: it would be nice to keep track of the source # of the find_links: support a find-links local path # relative to a requirements file. value = opts.find_links[0] req_dir = os.path.dirname(os.path.abspath(filename)) relative_to_reqs_file = os.path.join(req_dir, value) if os.path.exists(relative_to_reqs_file): value = relative_to_reqs_file finder.find_links.append(value) if opts.pre: finder.allow_all_prereleases = True if opts.trusted_hosts: finder.secure_origins.extend( ("*", host, "*") for host in opts.trusted_hosts)
def function[process_line, parameter[line, filename, line_number, finder, comes_from, options, session, wheel_cache, use_pep517, constraint]]: constant[Process a single requirements line; This can result in creating/yielding requirements, or updating the finder. For lines that contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS_REQ, and they are scoped to the requirement. Other options from SUPPORTED_OPTIONS may be present, but are ignored. For lines that do not contain requirements, the only options that have an effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may be present, but are ignored. These lines may contain multiple options (although our docs imply only one is supported), and all our parsed and affect the finder. :param constraint: If True, parsing a constraints file. :param options: OptionParser options that we may update ] variable[parser] assign[=] call[name[build_parser], parameter[name[line]]] variable[defaults] assign[=] call[name[parser].get_default_values, parameter[]] name[defaults].index_url assign[=] constant[None] if name[finder] begin[:] name[defaults].format_control assign[=] name[finder].format_control <ast.Tuple object at 0x7da18dc07700> assign[=] call[name[break_args_options], parameter[name[line]]] if compare[name[sys].version_info less[<] tuple[[<ast.Constant object at 0x7da18dc05930>, <ast.Constant object at 0x7da18dc06950>, <ast.Constant object at 0x7da18dc07af0>]]] begin[:] variable[options_str] assign[=] call[name[options_str].encode, parameter[constant[utf8]]] <ast.Tuple object at 0x7da18dc05b10> assign[=] call[name[parser].parse_args, parameter[call[name[shlex].split, parameter[name[options_str]]], name[defaults]]] variable[line_comes_from] assign[=] binary_operation[constant[%s %s (line %s)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.IfExp object at 0x7da18dc06f20>, <ast.Name object at 0x7da18dc05810>, <ast.Name object at 0x7da18dc06650>]]] if name[args_str] begin[:] variable[isolated] assign[=] <ast.IfExp object at 0x7da18dc05e70> if name[options] begin[:] call[name[cmdoptions].check_install_build_global, parameter[name[options], name[opts]]] variable[req_options] assign[=] dictionary[[], []] for taget[name[dest]] in starred[name[SUPPORTED_OPTIONS_REQ_DEST]] begin[:] if <ast.BoolOp object at 0x7da18dc06500> begin[:] call[name[req_options]][name[dest]] assign[=] call[name[opts].__dict__][name[dest]] <ast.Yield object at 0x7da18dc07010>
keyword[def] identifier[process_line] ( identifier[line] , identifier[filename] , identifier[line_number] , identifier[finder] = keyword[None] , identifier[comes_from] = keyword[None] , identifier[options] = keyword[None] , identifier[session] = keyword[None] , identifier[wheel_cache] = keyword[None] , identifier[use_pep517] = keyword[None] , identifier[constraint] = keyword[False] ): literal[string] identifier[parser] = identifier[build_parser] ( identifier[line] ) identifier[defaults] = identifier[parser] . identifier[get_default_values] () identifier[defaults] . identifier[index_url] = keyword[None] keyword[if] identifier[finder] : identifier[defaults] . identifier[format_control] = identifier[finder] . identifier[format_control] identifier[args_str] , identifier[options_str] = identifier[break_args_options] ( identifier[line] ) keyword[if] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] , literal[int] ): identifier[options_str] = identifier[options_str] . identifier[encode] ( literal[string] ) identifier[opts] , identifier[_] = identifier[parser] . identifier[parse_args] ( identifier[shlex] . identifier[split] ( identifier[options_str] ), identifier[defaults] ) identifier[line_comes_from] = literal[string] %( literal[string] keyword[if] identifier[constraint] keyword[else] literal[string] , identifier[filename] , identifier[line_number] , ) keyword[if] identifier[args_str] : identifier[isolated] = identifier[options] . identifier[isolated_mode] keyword[if] identifier[options] keyword[else] keyword[False] keyword[if] identifier[options] : identifier[cmdoptions] . identifier[check_install_build_global] ( identifier[options] , identifier[opts] ) identifier[req_options] ={} keyword[for] identifier[dest] keyword[in] identifier[SUPPORTED_OPTIONS_REQ_DEST] : keyword[if] identifier[dest] keyword[in] identifier[opts] . identifier[__dict__] keyword[and] identifier[opts] . identifier[__dict__] [ identifier[dest] ]: identifier[req_options] [ identifier[dest] ]= identifier[opts] . identifier[__dict__] [ identifier[dest] ] keyword[yield] identifier[install_req_from_line] ( identifier[args_str] , identifier[line_comes_from] , identifier[constraint] = identifier[constraint] , identifier[use_pep517] = identifier[use_pep517] , identifier[isolated] = identifier[isolated] , identifier[options] = identifier[req_options] , identifier[wheel_cache] = identifier[wheel_cache] ) keyword[elif] identifier[opts] . identifier[editables] : identifier[isolated] = identifier[options] . identifier[isolated_mode] keyword[if] identifier[options] keyword[else] keyword[False] keyword[yield] identifier[install_req_from_editable] ( identifier[opts] . identifier[editables] [ literal[int] ], identifier[comes_from] = identifier[line_comes_from] , identifier[use_pep517] = identifier[use_pep517] , identifier[constraint] = identifier[constraint] , identifier[isolated] = identifier[isolated] , identifier[wheel_cache] = identifier[wheel_cache] ) keyword[elif] identifier[opts] . identifier[requirements] keyword[or] identifier[opts] . identifier[constraints] : keyword[if] identifier[opts] . identifier[requirements] : identifier[req_path] = identifier[opts] . identifier[requirements] [ literal[int] ] identifier[nested_constraint] = keyword[False] keyword[else] : identifier[req_path] = identifier[opts] . identifier[constraints] [ literal[int] ] identifier[nested_constraint] = keyword[True] keyword[if] identifier[SCHEME_RE] . identifier[search] ( identifier[filename] ): identifier[req_path] = identifier[urllib_parse] . identifier[urljoin] ( identifier[filename] , identifier[req_path] ) keyword[elif] keyword[not] identifier[SCHEME_RE] . identifier[search] ( identifier[req_path] ): identifier[req_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] ), identifier[req_path] ) identifier[parsed_reqs] = identifier[parse_requirements] ( identifier[req_path] , identifier[finder] , identifier[comes_from] , identifier[options] , identifier[session] , identifier[constraint] = identifier[nested_constraint] , identifier[wheel_cache] = identifier[wheel_cache] ) keyword[for] identifier[req] keyword[in] identifier[parsed_reqs] : keyword[yield] identifier[req] keyword[elif] identifier[opts] . identifier[require_hashes] : identifier[options] . identifier[require_hashes] = identifier[opts] . identifier[require_hashes] keyword[elif] identifier[finder] : keyword[if] identifier[opts] . identifier[index_url] : identifier[finder] . identifier[index_urls] =[ identifier[opts] . identifier[index_url] ] keyword[if] identifier[opts] . identifier[no_index] keyword[is] keyword[True] : identifier[finder] . identifier[index_urls] =[] keyword[if] identifier[opts] . identifier[extra_index_urls] : identifier[finder] . identifier[index_urls] . identifier[extend] ( identifier[opts] . identifier[extra_index_urls] ) keyword[if] identifier[opts] . identifier[find_links] : identifier[value] = identifier[opts] . identifier[find_links] [ literal[int] ] identifier[req_dir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[filename] )) identifier[relative_to_reqs_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[req_dir] , identifier[value] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[relative_to_reqs_file] ): identifier[value] = identifier[relative_to_reqs_file] identifier[finder] . identifier[find_links] . identifier[append] ( identifier[value] ) keyword[if] identifier[opts] . identifier[pre] : identifier[finder] . identifier[allow_all_prereleases] = keyword[True] keyword[if] identifier[opts] . identifier[trusted_hosts] : identifier[finder] . identifier[secure_origins] . identifier[extend] ( ( literal[string] , identifier[host] , literal[string] ) keyword[for] identifier[host] keyword[in] identifier[opts] . identifier[trusted_hosts] )
def process_line(line, filename, line_number, finder=None, comes_from=None, options=None, session=None, wheel_cache=None, use_pep517=None, constraint=False): # type: Text # type: str # type: int # type: Optional[PackageFinder] # type: Optional[str] # type: Optional[optparse.Values] # type: Optional[PipSession] # type: Optional[WheelCache] # type: Optional[bool] # type: bool # type: (...) -> Iterator[InstallRequirement] 'Process a single requirements line; This can result in creating/yielding\n requirements, or updating the finder.\n\n For lines that contain requirements, the only options that have an effect\n are from SUPPORTED_OPTIONS_REQ, and they are scoped to the\n requirement. Other options from SUPPORTED_OPTIONS may be present, but are\n ignored.\n\n For lines that do not contain requirements, the only options that have an\n effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may\n be present, but are ignored. These lines may contain multiple options\n (although our docs imply only one is supported), and all our parsed and\n affect the finder.\n\n :param constraint: If True, parsing a constraints file.\n :param options: OptionParser options that we may update\n ' parser = build_parser(line) defaults = parser.get_default_values() defaults.index_url = None if finder: defaults.format_control = finder.format_control # depends on [control=['if'], data=[]] (args_str, options_str) = break_args_options(line) # Prior to 2.7.3, shlex cannot deal with unicode entries if sys.version_info < (2, 7, 3): # https://github.com/python/mypy/issues/1174 options_str = options_str.encode('utf8') # type: ignore # depends on [control=['if'], data=[]] # https://github.com/python/mypy/issues/1174 (opts, _) = parser.parse_args(shlex.split(options_str), defaults) # type: ignore # preserve for the nested code path line_comes_from = '%s %s (line %s)' % ('-c' if constraint else '-r', filename, line_number) # yield a line requirement if args_str: isolated = options.isolated_mode if options else False if options: cmdoptions.check_install_build_global(options, opts) # depends on [control=['if'], data=[]] # get the options that apply to requirements req_options = {} for dest in SUPPORTED_OPTIONS_REQ_DEST: if dest in opts.__dict__ and opts.__dict__[dest]: req_options[dest] = opts.__dict__[dest] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dest']] yield install_req_from_line(args_str, line_comes_from, constraint=constraint, use_pep517=use_pep517, isolated=isolated, options=req_options, wheel_cache=wheel_cache) # depends on [control=['if'], data=[]] # yield an editable requirement elif opts.editables: isolated = options.isolated_mode if options else False yield install_req_from_editable(opts.editables[0], comes_from=line_comes_from, use_pep517=use_pep517, constraint=constraint, isolated=isolated, wheel_cache=wheel_cache) # depends on [control=['if'], data=[]] # parse a nested requirements file elif opts.requirements or opts.constraints: if opts.requirements: req_path = opts.requirements[0] nested_constraint = False # depends on [control=['if'], data=[]] else: req_path = opts.constraints[0] nested_constraint = True # original file is over http if SCHEME_RE.search(filename): # do a url join so relative paths work req_path = urllib_parse.urljoin(filename, req_path) # depends on [control=['if'], data=[]] # original file and nested file are paths elif not SCHEME_RE.search(req_path): # do a join so relative paths work req_path = os.path.join(os.path.dirname(filename), req_path) # depends on [control=['if'], data=[]] # TODO: Why not use `comes_from='-r {} (line {})'` here as well? parsed_reqs = parse_requirements(req_path, finder, comes_from, options, session, constraint=nested_constraint, wheel_cache=wheel_cache) for req in parsed_reqs: yield req # depends on [control=['for'], data=['req']] # depends on [control=['if'], data=[]] # percolate hash-checking option upward elif opts.require_hashes: options.require_hashes = opts.require_hashes # depends on [control=['if'], data=[]] # set finder options elif finder: if opts.index_url: finder.index_urls = [opts.index_url] # depends on [control=['if'], data=[]] if opts.no_index is True: finder.index_urls = [] # depends on [control=['if'], data=[]] if opts.extra_index_urls: finder.index_urls.extend(opts.extra_index_urls) # depends on [control=['if'], data=[]] if opts.find_links: # FIXME: it would be nice to keep track of the source # of the find_links: support a find-links local path # relative to a requirements file. value = opts.find_links[0] req_dir = os.path.dirname(os.path.abspath(filename)) relative_to_reqs_file = os.path.join(req_dir, value) if os.path.exists(relative_to_reqs_file): value = relative_to_reqs_file # depends on [control=['if'], data=[]] finder.find_links.append(value) # depends on [control=['if'], data=[]] if opts.pre: finder.allow_all_prereleases = True # depends on [control=['if'], data=[]] if opts.trusted_hosts: finder.secure_origins.extend((('*', host, '*') for host in opts.trusted_hosts)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
def config_get(key, cwd=None, user=None, password=None, ignore_retcode=False, output_encoding=None, **kwargs): ''' Get the value of a key in the git configuration file key The name of the configuration key to get .. versionchanged:: 2015.8.0 Argument renamed from ``setting_name`` to ``key`` cwd The path to the git checkout .. versionchanged:: 2015.8.0 Now optional if ``global`` is set to ``True`` global : False If ``True``, query the global git configuration. Otherwise, only the local git configuration will be queried. .. versionadded:: 2015.8.0 all : False If ``True``, return a list of all values set for ``key``. If the key does not exist, ``None`` will be returned. .. versionadded:: 2015.8.0 user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 CLI Examples: .. code-block:: bash salt myminion git.config_get user.name cwd=/path/to/repo salt myminion git.config_get user.email global=True salt myminion git.config_get core.gitproxy cwd=/path/to/repo all=True ''' # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'all' as an argument to this function without # shadowing all(), while also not allowing unwanted arguments to be passed. all_ = kwargs.pop('all', False) result = _config_getter('--get-all', key, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding, **kwargs) # git config --get exits with retcode of 1 when key does not exist if result['retcode'] == 1: return None ret = result['stdout'].splitlines() if all_: return ret else: try: return ret[-1] except IndexError: # Should never happen but I'm paranoid and don't like tracebacks return ''
def function[config_get, parameter[key, cwd, user, password, ignore_retcode, output_encoding]]: constant[ Get the value of a key in the git configuration file key The name of the configuration key to get .. versionchanged:: 2015.8.0 Argument renamed from ``setting_name`` to ``key`` cwd The path to the git checkout .. versionchanged:: 2015.8.0 Now optional if ``global`` is set to ``True`` global : False If ``True``, query the global git configuration. Otherwise, only the local git configuration will be queried. .. versionadded:: 2015.8.0 all : False If ``True``, return a list of all values set for ``key``. If the key does not exist, ``None`` will be returned. .. versionadded:: 2015.8.0 user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 CLI Examples: .. code-block:: bash salt myminion git.config_get user.name cwd=/path/to/repo salt myminion git.config_get user.email global=True salt myminion git.config_get core.gitproxy cwd=/path/to/repo all=True ] variable[all_] assign[=] call[name[kwargs].pop, parameter[constant[all], constant[False]]] variable[result] assign[=] call[name[_config_getter], parameter[constant[--get-all], name[key]]] if compare[call[name[result]][constant[retcode]] equal[==] constant[1]] begin[:] return[constant[None]] variable[ret] assign[=] call[call[name[result]][constant[stdout]].splitlines, parameter[]] if name[all_] begin[:] return[name[ret]]
keyword[def] identifier[config_get] ( identifier[key] , identifier[cwd] = keyword[None] , identifier[user] = keyword[None] , identifier[password] = keyword[None] , identifier[ignore_retcode] = keyword[False] , identifier[output_encoding] = keyword[None] , ** identifier[kwargs] ): literal[string] identifier[all_] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] ) identifier[result] = identifier[_config_getter] ( literal[string] , identifier[key] , identifier[cwd] = identifier[cwd] , identifier[user] = identifier[user] , identifier[password] = identifier[password] , identifier[ignore_retcode] = identifier[ignore_retcode] , identifier[output_encoding] = identifier[output_encoding] , ** identifier[kwargs] ) keyword[if] identifier[result] [ literal[string] ]== literal[int] : keyword[return] keyword[None] identifier[ret] = identifier[result] [ literal[string] ]. identifier[splitlines] () keyword[if] identifier[all_] : keyword[return] identifier[ret] keyword[else] : keyword[try] : keyword[return] identifier[ret] [- literal[int] ] keyword[except] identifier[IndexError] : keyword[return] literal[string]
def config_get(key, cwd=None, user=None, password=None, ignore_retcode=False, output_encoding=None, **kwargs): """ Get the value of a key in the git configuration file key The name of the configuration key to get .. versionchanged:: 2015.8.0 Argument renamed from ``setting_name`` to ``key`` cwd The path to the git checkout .. versionchanged:: 2015.8.0 Now optional if ``global`` is set to ``True`` global : False If ``True``, query the global git configuration. Otherwise, only the local git configuration will be queried. .. versionadded:: 2015.8.0 all : False If ``True``, return a list of all values set for ``key``. If the key does not exist, ``None`` will be returned. .. versionadded:: 2015.8.0 user User under which to run the git command. By default, the command is run by the user under which the minion is running. password Windows only. Required when specifying ``user``. This parameter will be ignored on non-Windows platforms. .. versionadded:: 2016.3.4 ignore_retcode : False If ``True``, do not log an error to the minion log if the git command returns a nonzero exit status. .. versionadded:: 2015.8.0 output_encoding Use this option to specify which encoding to use to decode the output from any git commands which are run. This should not be needed in most cases. .. note:: This should only be needed if the files in the repository were created with filenames using an encoding other than UTF-8 to handle Unicode characters. .. versionadded:: 2018.3.1 CLI Examples: .. code-block:: bash salt myminion git.config_get user.name cwd=/path/to/repo salt myminion git.config_get user.email global=True salt myminion git.config_get core.gitproxy cwd=/path/to/repo all=True """ # Sanitize kwargs and make sure that no invalid ones were passed. This # allows us to accept 'all' as an argument to this function without # shadowing all(), while also not allowing unwanted arguments to be passed. all_ = kwargs.pop('all', False) result = _config_getter('--get-all', key, cwd=cwd, user=user, password=password, ignore_retcode=ignore_retcode, output_encoding=output_encoding, **kwargs) # git config --get exits with retcode of 1 when key does not exist if result['retcode'] == 1: return None # depends on [control=['if'], data=[]] ret = result['stdout'].splitlines() if all_: return ret # depends on [control=['if'], data=[]] else: try: return ret[-1] # depends on [control=['try'], data=[]] except IndexError: # Should never happen but I'm paranoid and don't like tracebacks return '' # depends on [control=['except'], data=[]]
def parse(file_to_parse): # suppress(unused-function) """Check file_to_parse for a shebang and return its components. :file_to_parse: can be either a filename or an open file object. If file_to_parse's extension exists in PATHEXT then an empty list will be returned, as it is assumed that the operating system knows how to handle files of this type. All other files will be opened and read. """ if hasattr(file_to_parse, "read"): return _parse(file_to_parse) elif isinstance(file_to_parse, six.string_types): if "PATHEXT" in os.environ: path_ext = os.environ["PATHEXT"].split(os.pathsep) else: path_ext = () if os.path.splitext(file_to_parse)[1] in path_ext: return [] with open(file_to_parse, "r") as fileobj: return _parse(fileobj) raise RuntimeError("""{} was not a file-like """ """object.""".format(repr(file_to_parse)))
def function[parse, parameter[file_to_parse]]: constant[Check file_to_parse for a shebang and return its components. :file_to_parse: can be either a filename or an open file object. If file_to_parse's extension exists in PATHEXT then an empty list will be returned, as it is assumed that the operating system knows how to handle files of this type. All other files will be opened and read. ] if call[name[hasattr], parameter[name[file_to_parse], constant[read]]] begin[:] return[call[name[_parse], parameter[name[file_to_parse]]]] <ast.Raise object at 0x7da18c4cf460>
keyword[def] identifier[parse] ( identifier[file_to_parse] ): literal[string] keyword[if] identifier[hasattr] ( identifier[file_to_parse] , literal[string] ): keyword[return] identifier[_parse] ( identifier[file_to_parse] ) keyword[elif] identifier[isinstance] ( identifier[file_to_parse] , identifier[six] . identifier[string_types] ): keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] : identifier[path_ext] = identifier[os] . identifier[environ] [ literal[string] ]. identifier[split] ( identifier[os] . identifier[pathsep] ) keyword[else] : identifier[path_ext] =() keyword[if] identifier[os] . identifier[path] . identifier[splitext] ( identifier[file_to_parse] )[ literal[int] ] keyword[in] identifier[path_ext] : keyword[return] [] keyword[with] identifier[open] ( identifier[file_to_parse] , literal[string] ) keyword[as] identifier[fileobj] : keyword[return] identifier[_parse] ( identifier[fileobj] ) keyword[raise] identifier[RuntimeError] ( literal[string] literal[string] . identifier[format] ( identifier[repr] ( identifier[file_to_parse] )))
def parse(file_to_parse): # suppress(unused-function) "Check file_to_parse for a shebang and return its components.\n\n :file_to_parse: can be either a filename or an open file object.\n\n If file_to_parse's extension exists in PATHEXT then an empty list\n will be returned, as it is assumed that the operating system knows\n how to handle files of this type. All other files will be opened\n and read.\n " if hasattr(file_to_parse, 'read'): return _parse(file_to_parse) # depends on [control=['if'], data=[]] elif isinstance(file_to_parse, six.string_types): if 'PATHEXT' in os.environ: path_ext = os.environ['PATHEXT'].split(os.pathsep) # depends on [control=['if'], data=[]] else: path_ext = () if os.path.splitext(file_to_parse)[1] in path_ext: return [] # depends on [control=['if'], data=[]] with open(file_to_parse, 'r') as fileobj: return _parse(fileobj) # depends on [control=['with'], data=['fileobj']] # depends on [control=['if'], data=[]] raise RuntimeError('{} was not a file-like object.'.format(repr(file_to_parse)))
def touch(self, message_id, reservation_id, timeout=None): """Touching a reserved message extends its timeout to the duration specified when the message was created. Arguments: message_id -- The ID of the message. reservation_id -- Reservation Id of the message. timeout -- Optional. The timeout in seconds after which new reservation will expire. """ url = "queues/%s/messages/%s/touch" % (self.name, message_id) qitems = {'reservation_id': reservation_id} if timeout is not None: qitems['timeout'] = timeout body = json.dumps(qitems) response = self.client.post(url, body=body, headers={'Content-Type': 'application/json'}) return response['body']
def function[touch, parameter[self, message_id, reservation_id, timeout]]: constant[Touching a reserved message extends its timeout to the duration specified when the message was created. Arguments: message_id -- The ID of the message. reservation_id -- Reservation Id of the message. timeout -- Optional. The timeout in seconds after which new reservation will expire. ] variable[url] assign[=] binary_operation[constant[queues/%s/messages/%s/touch] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da18bc730a0>, <ast.Name object at 0x7da18bc71630>]]] variable[qitems] assign[=] dictionary[[<ast.Constant object at 0x7da18bc723e0>], [<ast.Name object at 0x7da18bc70580>]] if compare[name[timeout] is_not constant[None]] begin[:] call[name[qitems]][constant[timeout]] assign[=] name[timeout] variable[body] assign[=] call[name[json].dumps, parameter[name[qitems]]] variable[response] assign[=] call[name[self].client.post, parameter[name[url]]] return[call[name[response]][constant[body]]]
keyword[def] identifier[touch] ( identifier[self] , identifier[message_id] , identifier[reservation_id] , identifier[timeout] = keyword[None] ): literal[string] identifier[url] = literal[string] %( identifier[self] . identifier[name] , identifier[message_id] ) identifier[qitems] ={ literal[string] : identifier[reservation_id] } keyword[if] identifier[timeout] keyword[is] keyword[not] keyword[None] : identifier[qitems] [ literal[string] ]= identifier[timeout] identifier[body] = identifier[json] . identifier[dumps] ( identifier[qitems] ) identifier[response] = identifier[self] . identifier[client] . identifier[post] ( identifier[url] , identifier[body] = identifier[body] , identifier[headers] ={ literal[string] : literal[string] }) keyword[return] identifier[response] [ literal[string] ]
def touch(self, message_id, reservation_id, timeout=None): """Touching a reserved message extends its timeout to the duration specified when the message was created. Arguments: message_id -- The ID of the message. reservation_id -- Reservation Id of the message. timeout -- Optional. The timeout in seconds after which new reservation will expire. """ url = 'queues/%s/messages/%s/touch' % (self.name, message_id) qitems = {'reservation_id': reservation_id} if timeout is not None: qitems['timeout'] = timeout # depends on [control=['if'], data=['timeout']] body = json.dumps(qitems) response = self.client.post(url, body=body, headers={'Content-Type': 'application/json'}) return response['body']
def _process_priv_part(perms): ''' Process part ''' _tmp = {} previous = None for perm in perms: if previous is None: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] else: if perm == '*': _tmp[previous] = True else: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] return _tmp
def function[_process_priv_part, parameter[perms]]: constant[ Process part ] variable[_tmp] assign[=] dictionary[[], []] variable[previous] assign[=] constant[None] for taget[name[perm]] in starred[name[perms]] begin[:] if compare[name[previous] is constant[None]] begin[:] call[name[_tmp]][call[name[_PRIVILEGES_MAP]][name[perm]]] assign[=] constant[False] variable[previous] assign[=] call[name[_PRIVILEGES_MAP]][name[perm]] return[name[_tmp]]
keyword[def] identifier[_process_priv_part] ( identifier[perms] ): literal[string] identifier[_tmp] ={} identifier[previous] = keyword[None] keyword[for] identifier[perm] keyword[in] identifier[perms] : keyword[if] identifier[previous] keyword[is] keyword[None] : identifier[_tmp] [ identifier[_PRIVILEGES_MAP] [ identifier[perm] ]]= keyword[False] identifier[previous] = identifier[_PRIVILEGES_MAP] [ identifier[perm] ] keyword[else] : keyword[if] identifier[perm] == literal[string] : identifier[_tmp] [ identifier[previous] ]= keyword[True] keyword[else] : identifier[_tmp] [ identifier[_PRIVILEGES_MAP] [ identifier[perm] ]]= keyword[False] identifier[previous] = identifier[_PRIVILEGES_MAP] [ identifier[perm] ] keyword[return] identifier[_tmp]
def _process_priv_part(perms): """ Process part """ _tmp = {} previous = None for perm in perms: if previous is None: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] # depends on [control=['if'], data=['previous']] elif perm == '*': _tmp[previous] = True # depends on [control=['if'], data=[]] else: _tmp[_PRIVILEGES_MAP[perm]] = False previous = _PRIVILEGES_MAP[perm] # depends on [control=['for'], data=['perm']] return _tmp
def remove_independent_variable(self, variable_name): """ Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return: """ self._remove_child(variable_name) # Remove also from the list of independent variables self._independent_variables.pop(variable_name)
def function[remove_independent_variable, parameter[self, variable_name]]: constant[ Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return: ] call[name[self]._remove_child, parameter[name[variable_name]]] call[name[self]._independent_variables.pop, parameter[name[variable_name]]]
keyword[def] identifier[remove_independent_variable] ( identifier[self] , identifier[variable_name] ): literal[string] identifier[self] . identifier[_remove_child] ( identifier[variable_name] ) identifier[self] . identifier[_independent_variables] . identifier[pop] ( identifier[variable_name] )
def remove_independent_variable(self, variable_name): """ Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return: """ self._remove_child(variable_name) # Remove also from the list of independent variables self._independent_variables.pop(variable_name)
def list_previous_page(self): """ When paging through results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get("domain", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of domains " "to list.") return self._list(uri)
def function[list_previous_page, parameter[self]]: constant[ When paging through results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. ] variable[uri] assign[=] call[call[name[self]._paging.get, parameter[constant[domain], dictionary[[], []]]].get, parameter[constant[prev_uri]]] if compare[name[uri] is constant[None]] begin[:] <ast.Raise object at 0x7da20e961720> return[call[name[self]._list, parameter[name[uri]]]]
keyword[def] identifier[list_previous_page] ( identifier[self] ): literal[string] identifier[uri] = identifier[self] . identifier[_paging] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] ) keyword[if] identifier[uri] keyword[is] keyword[None] : keyword[raise] identifier[exc] . identifier[NoMoreResults] ( literal[string] literal[string] ) keyword[return] identifier[self] . identifier[_list] ( identifier[uri] )
def list_previous_page(self): """ When paging through results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised. """ uri = self._paging.get('domain', {}).get('prev_uri') if uri is None: raise exc.NoMoreResults('There are no previous pages of domains to list.') # depends on [control=['if'], data=[]] return self._list(uri)
def from_file(cls, filename, name=''): "Imports a mass table from a file" df = pd.read_csv(filename, header=0, delim_whitespace=True, index_col=[0, 1])['M'] df.name = name return cls(df=df, name=name)
def function[from_file, parameter[cls, filename, name]]: constant[Imports a mass table from a file] variable[df] assign[=] call[call[name[pd].read_csv, parameter[name[filename]]]][constant[M]] name[df].name assign[=] name[name] return[call[name[cls], parameter[]]]
keyword[def] identifier[from_file] ( identifier[cls] , identifier[filename] , identifier[name] = literal[string] ): literal[string] identifier[df] = identifier[pd] . identifier[read_csv] ( identifier[filename] , identifier[header] = literal[int] , identifier[delim_whitespace] = keyword[True] , identifier[index_col] =[ literal[int] , literal[int] ])[ literal[string] ] identifier[df] . identifier[name] = identifier[name] keyword[return] identifier[cls] ( identifier[df] = identifier[df] , identifier[name] = identifier[name] )
def from_file(cls, filename, name=''): """Imports a mass table from a file""" df = pd.read_csv(filename, header=0, delim_whitespace=True, index_col=[0, 1])['M'] df.name = name return cls(df=df, name=name)
async def update_price_info(self): """Update price info async.""" query = gql( """ { viewer { home(id: "%s") { currentSubscription { priceInfo { current { energy tax total startsAt level } today { total startsAt level } tomorrow { total startsAt level } } } } } } """ % self.home_id ) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error("Could not find price info.") return self._price_info = {} self._level_info = {} for key in ["current", "today", "tomorrow"]: try: home = price_info_temp["viewer"]["home"] current_subscription = home["currentSubscription"] price_info = current_subscription["priceInfo"][key] except (KeyError, TypeError): _LOGGER.error("Could not find price info for %s.", key) continue if key == "current": self._current_price_info = price_info continue for data in price_info: self._price_info[data.get("startsAt")] = data.get("total") self._level_info[data.get("startsAt")] = data.get("level")
<ast.AsyncFunctionDef object at 0x7da1afe6de10>
keyword[async] keyword[def] identifier[update_price_info] ( identifier[self] ): literal[string] identifier[query] = identifier[gql] ( literal[string] % identifier[self] . identifier[home_id] ) identifier[price_info_temp] = keyword[await] identifier[self] . identifier[_tibber_control] . identifier[execute] ( identifier[query] ) keyword[if] keyword[not] identifier[price_info_temp] : identifier[_LOGGER] . identifier[error] ( literal[string] ) keyword[return] identifier[self] . identifier[_price_info] ={} identifier[self] . identifier[_level_info] ={} keyword[for] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] ]: keyword[try] : identifier[home] = identifier[price_info_temp] [ literal[string] ][ literal[string] ] identifier[current_subscription] = identifier[home] [ literal[string] ] identifier[price_info] = identifier[current_subscription] [ literal[string] ][ identifier[key] ] keyword[except] ( identifier[KeyError] , identifier[TypeError] ): identifier[_LOGGER] . identifier[error] ( literal[string] , identifier[key] ) keyword[continue] keyword[if] identifier[key] == literal[string] : identifier[self] . identifier[_current_price_info] = identifier[price_info] keyword[continue] keyword[for] identifier[data] keyword[in] identifier[price_info] : identifier[self] . identifier[_price_info] [ identifier[data] . identifier[get] ( literal[string] )]= identifier[data] . identifier[get] ( literal[string] ) identifier[self] . identifier[_level_info] [ identifier[data] . identifier[get] ( literal[string] )]= identifier[data] . identifier[get] ( literal[string] )
async def update_price_info(self): """Update price info async.""" query = gql('\n {\n viewer {\n home(id: "%s") {\n currentSubscription {\n priceInfo {\n current {\n energy\n tax\n total\n startsAt\n level\n }\n today {\n total\n startsAt\n level\n }\n tomorrow {\n total\n startsAt\n level\n }\n }\n }\n }\n }\n }\n ' % self.home_id) price_info_temp = await self._tibber_control.execute(query) if not price_info_temp: _LOGGER.error('Could not find price info.') return # depends on [control=['if'], data=[]] self._price_info = {} self._level_info = {} for key in ['current', 'today', 'tomorrow']: try: home = price_info_temp['viewer']['home'] current_subscription = home['currentSubscription'] price_info = current_subscription['priceInfo'][key] # depends on [control=['try'], data=[]] except (KeyError, TypeError): _LOGGER.error('Could not find price info for %s.', key) continue # depends on [control=['except'], data=[]] if key == 'current': self._current_price_info = price_info continue # depends on [control=['if'], data=[]] for data in price_info: self._price_info[data.get('startsAt')] = data.get('total') self._level_info[data.get('startsAt')] = data.get('level') # depends on [control=['for'], data=['data']] # depends on [control=['for'], data=['key']]
def call_ext_prog(self, prog, timeout=300, stderr=True, chroot=True, runat=None): """Execute a command independantly of the output gathering part of sosreport. """ return self.get_command_output(prog, timeout=timeout, stderr=stderr, chroot=chroot, runat=runat)
def function[call_ext_prog, parameter[self, prog, timeout, stderr, chroot, runat]]: constant[Execute a command independantly of the output gathering part of sosreport. ] return[call[name[self].get_command_output, parameter[name[prog]]]]
keyword[def] identifier[call_ext_prog] ( identifier[self] , identifier[prog] , identifier[timeout] = literal[int] , identifier[stderr] = keyword[True] , identifier[chroot] = keyword[True] , identifier[runat] = keyword[None] ): literal[string] keyword[return] identifier[self] . identifier[get_command_output] ( identifier[prog] , identifier[timeout] = identifier[timeout] , identifier[stderr] = identifier[stderr] , identifier[chroot] = identifier[chroot] , identifier[runat] = identifier[runat] )
def call_ext_prog(self, prog, timeout=300, stderr=True, chroot=True, runat=None): """Execute a command independantly of the output gathering part of sosreport. """ return self.get_command_output(prog, timeout=timeout, stderr=stderr, chroot=chroot, runat=runat)
def is_abstract_model(model): """ Given a model class, returns a boolean True if it is abstract and False if it is not. """ return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
def function[is_abstract_model, parameter[model]]: constant[ Given a model class, returns a boolean True if it is abstract and False if it is not. ] return[<ast.BoolOp object at 0x7da1b1300250>]
keyword[def] identifier[is_abstract_model] ( identifier[model] ): literal[string] keyword[return] identifier[hasattr] ( identifier[model] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[model] . identifier[_meta] , literal[string] ) keyword[and] identifier[model] . identifier[_meta] . identifier[abstract]
def is_abstract_model(model): """ Given a model class, returns a boolean True if it is abstract and False if it is not. """ return hasattr(model, '_meta') and hasattr(model._meta, 'abstract') and model._meta.abstract
def _gorg(a): """Return the farthest origin of a generic class (internal helper).""" assert isinstance(a, GenericMeta) while a.__origin__ is not None: a = a.__origin__ return a
def function[_gorg, parameter[a]]: constant[Return the farthest origin of a generic class (internal helper).] assert[call[name[isinstance], parameter[name[a], name[GenericMeta]]]] while compare[name[a].__origin__ is_not constant[None]] begin[:] variable[a] assign[=] name[a].__origin__ return[name[a]]
keyword[def] identifier[_gorg] ( identifier[a] ): literal[string] keyword[assert] identifier[isinstance] ( identifier[a] , identifier[GenericMeta] ) keyword[while] identifier[a] . identifier[__origin__] keyword[is] keyword[not] keyword[None] : identifier[a] = identifier[a] . identifier[__origin__] keyword[return] identifier[a]
def _gorg(a): """Return the farthest origin of a generic class (internal helper).""" assert isinstance(a, GenericMeta) while a.__origin__ is not None: a = a.__origin__ # depends on [control=['while'], data=[]] return a
def from_cdms2(variable): """Convert a cdms2 variable into an DataArray """ values = np.asarray(variable) name = variable.id dims = variable.getAxisIds() coords = {} for axis in variable.getAxisList(): coords[axis.id] = DataArray( np.asarray(axis), dims=[axis.id], attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs)) grid = variable.getGrid() if grid is not None: ids = [a.id for a in grid.getAxisList()] for axis in grid.getLongitude(), grid.getLatitude(): if axis.id not in variable.getAxisIds(): coords[axis.id] = DataArray( np.asarray(axis[:]), dims=ids, attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs)) attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs) dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs) return decode_cf(dataarray.to_dataset())[dataarray.name]
def function[from_cdms2, parameter[variable]]: constant[Convert a cdms2 variable into an DataArray ] variable[values] assign[=] call[name[np].asarray, parameter[name[variable]]] variable[name] assign[=] name[variable].id variable[dims] assign[=] call[name[variable].getAxisIds, parameter[]] variable[coords] assign[=] dictionary[[], []] for taget[name[axis]] in starred[call[name[variable].getAxisList, parameter[]]] begin[:] call[name[coords]][name[axis].id] assign[=] call[name[DataArray], parameter[call[name[np].asarray, parameter[name[axis]]]]] variable[grid] assign[=] call[name[variable].getGrid, parameter[]] if compare[name[grid] is_not constant[None]] begin[:] variable[ids] assign[=] <ast.ListComp object at 0x7da18f09d300> for taget[name[axis]] in starred[tuple[[<ast.Call object at 0x7da1b1f95450>, <ast.Call object at 0x7da1b1f95db0>]]] begin[:] if compare[name[axis].id <ast.NotIn object at 0x7da2590d7190> call[name[variable].getAxisIds, parameter[]]] begin[:] call[name[coords]][name[axis].id] assign[=] call[name[DataArray], parameter[call[name[np].asarray, parameter[call[name[axis]][<ast.Slice object at 0x7da1b1f94ee0>]]]]] variable[attrs] assign[=] call[name[_filter_attrs], parameter[name[variable].attributes, name[cdms2_ignored_attrs]]] variable[dataarray] assign[=] call[name[DataArray], parameter[name[values]]] return[call[call[name[decode_cf], parameter[call[name[dataarray].to_dataset, parameter[]]]]][name[dataarray].name]]
keyword[def] identifier[from_cdms2] ( identifier[variable] ): literal[string] identifier[values] = identifier[np] . identifier[asarray] ( identifier[variable] ) identifier[name] = identifier[variable] . identifier[id] identifier[dims] = identifier[variable] . identifier[getAxisIds] () identifier[coords] ={} keyword[for] identifier[axis] keyword[in] identifier[variable] . identifier[getAxisList] (): identifier[coords] [ identifier[axis] . identifier[id] ]= identifier[DataArray] ( identifier[np] . identifier[asarray] ( identifier[axis] ), identifier[dims] =[ identifier[axis] . identifier[id] ], identifier[attrs] = identifier[_filter_attrs] ( identifier[axis] . identifier[attributes] , identifier[cdms2_ignored_attrs] )) identifier[grid] = identifier[variable] . identifier[getGrid] () keyword[if] identifier[grid] keyword[is] keyword[not] keyword[None] : identifier[ids] =[ identifier[a] . identifier[id] keyword[for] identifier[a] keyword[in] identifier[grid] . identifier[getAxisList] ()] keyword[for] identifier[axis] keyword[in] identifier[grid] . identifier[getLongitude] (), identifier[grid] . identifier[getLatitude] (): keyword[if] identifier[axis] . identifier[id] keyword[not] keyword[in] identifier[variable] . identifier[getAxisIds] (): identifier[coords] [ identifier[axis] . identifier[id] ]= identifier[DataArray] ( identifier[np] . identifier[asarray] ( identifier[axis] [:]), identifier[dims] = identifier[ids] , identifier[attrs] = identifier[_filter_attrs] ( identifier[axis] . identifier[attributes] , identifier[cdms2_ignored_attrs] )) identifier[attrs] = identifier[_filter_attrs] ( identifier[variable] . identifier[attributes] , identifier[cdms2_ignored_attrs] ) identifier[dataarray] = identifier[DataArray] ( identifier[values] , identifier[dims] = identifier[dims] , identifier[coords] = identifier[coords] , identifier[name] = identifier[name] , identifier[attrs] = identifier[attrs] ) keyword[return] identifier[decode_cf] ( identifier[dataarray] . identifier[to_dataset] ())[ identifier[dataarray] . identifier[name] ]
def from_cdms2(variable): """Convert a cdms2 variable into an DataArray """ values = np.asarray(variable) name = variable.id dims = variable.getAxisIds() coords = {} for axis in variable.getAxisList(): coords[axis.id] = DataArray(np.asarray(axis), dims=[axis.id], attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs)) # depends on [control=['for'], data=['axis']] grid = variable.getGrid() if grid is not None: ids = [a.id for a in grid.getAxisList()] for axis in (grid.getLongitude(), grid.getLatitude()): if axis.id not in variable.getAxisIds(): coords[axis.id] = DataArray(np.asarray(axis[:]), dims=ids, attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['axis']] # depends on [control=['if'], data=['grid']] attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs) dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs) return decode_cf(dataarray.to_dataset())[dataarray.name]
def _is_string(thing): """Check that **thing** is a string. The definition of the latter depends upon the Python version. :param thing: The thing to check if it's a string. :rtype: bool :returns: ``True`` if **thing** is string (or unicode in Python2). """ if (_py3k and isinstance(thing, str)): return True if (not _py3k and isinstance(thing, basestring)): return True return False
def function[_is_string, parameter[thing]]: constant[Check that **thing** is a string. The definition of the latter depends upon the Python version. :param thing: The thing to check if it's a string. :rtype: bool :returns: ``True`` if **thing** is string (or unicode in Python2). ] if <ast.BoolOp object at 0x7da18bcc9c60> begin[:] return[constant[True]] if <ast.BoolOp object at 0x7da18bccbbe0> begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[_is_string] ( identifier[thing] ): literal[string] keyword[if] ( identifier[_py3k] keyword[and] identifier[isinstance] ( identifier[thing] , identifier[str] )): keyword[return] keyword[True] keyword[if] ( keyword[not] identifier[_py3k] keyword[and] identifier[isinstance] ( identifier[thing] , identifier[basestring] )): keyword[return] keyword[True] keyword[return] keyword[False]
def _is_string(thing): """Check that **thing** is a string. The definition of the latter depends upon the Python version. :param thing: The thing to check if it's a string. :rtype: bool :returns: ``True`` if **thing** is string (or unicode in Python2). """ if _py3k and isinstance(thing, str): return True # depends on [control=['if'], data=[]] if not _py3k and isinstance(thing, basestring): return True # depends on [control=['if'], data=[]] return False
def outfile(self): """Path of the output file""" return os.path.join(OPTIONS['base_dir'], '{0}.{1}'.format(self.name, OPTIONS['out_ext']))
def function[outfile, parameter[self]]: constant[Path of the output file] return[call[name[os].path.join, parameter[call[name[OPTIONS]][constant[base_dir]], call[constant[{0}.{1}].format, parameter[name[self].name, call[name[OPTIONS]][constant[out_ext]]]]]]]
keyword[def] identifier[outfile] ( identifier[self] ): literal[string] keyword[return] identifier[os] . identifier[path] . identifier[join] ( identifier[OPTIONS] [ literal[string] ], literal[string] . identifier[format] ( identifier[self] . identifier[name] , identifier[OPTIONS] [ literal[string] ]))
def outfile(self): """Path of the output file""" return os.path.join(OPTIONS['base_dir'], '{0}.{1}'.format(self.name, OPTIONS['out_ext']))
def load_table_from_uri( self, source_uris, destination, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None, retry=DEFAULT_RETRY, ): """Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ \ :class:`~google.cloud.bigquery.table.Table`, \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job. """ job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project if location is None: location = self.location job_ref = job._JobReference(job_id, project=project, location=location) if isinstance(source_uris, six.string_types): source_uris = [source_uris] destination = _table_arg_to_table_ref(destination, default_project=self.project) load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config) load_job._begin(retry=retry) return load_job
def function[load_table_from_uri, parameter[self, source_uris, destination, job_id, job_id_prefix, location, project, job_config, retry]]: constant[Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job. ] variable[job_id] assign[=] call[name[_make_job_id], parameter[name[job_id], name[job_id_prefix]]] if compare[name[project] is constant[None]] begin[:] variable[project] assign[=] name[self].project if compare[name[location] is constant[None]] begin[:] variable[location] assign[=] name[self].location variable[job_ref] assign[=] call[name[job]._JobReference, parameter[name[job_id]]] if call[name[isinstance], parameter[name[source_uris], name[six].string_types]] begin[:] variable[source_uris] assign[=] list[[<ast.Name object at 0x7da20e9571f0>]] variable[destination] assign[=] call[name[_table_arg_to_table_ref], parameter[name[destination]]] variable[load_job] assign[=] call[name[job].LoadJob, parameter[name[job_ref], name[source_uris], name[destination], name[self], name[job_config]]] call[name[load_job]._begin, parameter[]] return[name[load_job]]
keyword[def] identifier[load_table_from_uri] ( identifier[self] , identifier[source_uris] , identifier[destination] , identifier[job_id] = keyword[None] , identifier[job_id_prefix] = keyword[None] , identifier[location] = keyword[None] , identifier[project] = keyword[None] , identifier[job_config] = keyword[None] , identifier[retry] = identifier[DEFAULT_RETRY] , ): literal[string] identifier[job_id] = identifier[_make_job_id] ( identifier[job_id] , identifier[job_id_prefix] ) keyword[if] identifier[project] keyword[is] keyword[None] : identifier[project] = identifier[self] . identifier[project] keyword[if] identifier[location] keyword[is] keyword[None] : identifier[location] = identifier[self] . identifier[location] identifier[job_ref] = identifier[job] . identifier[_JobReference] ( identifier[job_id] , identifier[project] = identifier[project] , identifier[location] = identifier[location] ) keyword[if] identifier[isinstance] ( identifier[source_uris] , identifier[six] . identifier[string_types] ): identifier[source_uris] =[ identifier[source_uris] ] identifier[destination] = identifier[_table_arg_to_table_ref] ( identifier[destination] , identifier[default_project] = identifier[self] . identifier[project] ) identifier[load_job] = identifier[job] . identifier[LoadJob] ( identifier[job_ref] , identifier[source_uris] , identifier[destination] , identifier[self] , identifier[job_config] ) identifier[load_job] . identifier[_begin] ( identifier[retry] = identifier[retry] ) keyword[return] identifier[load_job]
def load_table_from_uri(self, source_uris, destination, job_id=None, job_id_prefix=None, location=None, project=None, job_config=None, retry=DEFAULT_RETRY): """Starts a job for loading data into a table from CloudStorage. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load Arguments: source_uris (Union[str, Sequence[str]]): URIs of data files to be loaded; in format ``gs://<bucket_name>/<object_name_or_glob>``. destination (Union[ :class:`~google.cloud.bigquery.table.Table`, :class:`~google.cloud.bigquery.table.TableReference`, str, ]): Table into which data is to be loaded. If a string is passed in, this method attempts to create a table reference from a string using :func:`google.cloud.bigquery.table.TableReference.from_string`. Keyword Arguments: job_id (str): (Optional) Name of the job. job_id_prefix (str): (Optional) the user-provided prefix for a randomly generated job ID. This parameter will be ignored if a ``job_id`` is also given. location (str): Location where to run the job. Must match the location of the destination table. project (str): Project ID of the project of where to run the job. Defaults to the client's project. job_config (google.cloud.bigquery.job.LoadJobConfig): (Optional) Extra configuration options for the job. retry (google.api_core.retry.Retry): (Optional) How to retry the RPC. Returns: google.cloud.bigquery.job.LoadJob: A new load job. """ job_id = _make_job_id(job_id, job_id_prefix) if project is None: project = self.project # depends on [control=['if'], data=['project']] if location is None: location = self.location # depends on [control=['if'], data=['location']] job_ref = job._JobReference(job_id, project=project, location=location) if isinstance(source_uris, six.string_types): source_uris = [source_uris] # depends on [control=['if'], data=[]] destination = _table_arg_to_table_ref(destination, default_project=self.project) load_job = job.LoadJob(job_ref, source_uris, destination, self, job_config) load_job._begin(retry=retry) return load_job
def finditer(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" scanner = self.scanner(string, pos, endpos) return iter(scanner.search, None)
def function[finditer, parameter[self, string, pos, endpos]]: constant[Return a list of all non-overlapping matches of pattern in string.] variable[scanner] assign[=] call[name[self].scanner, parameter[name[string], name[pos], name[endpos]]] return[call[name[iter], parameter[name[scanner].search, constant[None]]]]
keyword[def] identifier[finditer] ( identifier[self] , identifier[string] , identifier[pos] = literal[int] , identifier[endpos] = identifier[sys] . identifier[maxint] ): literal[string] identifier[scanner] = identifier[self] . identifier[scanner] ( identifier[string] , identifier[pos] , identifier[endpos] ) keyword[return] identifier[iter] ( identifier[scanner] . identifier[search] , keyword[None] )
def finditer(self, string, pos=0, endpos=sys.maxint): """Return a list of all non-overlapping matches of pattern in string.""" scanner = self.scanner(string, pos, endpos) return iter(scanner.search, None)
def first_older_than_second(version_a, version_b): """ Tests for the NSS version string in the first parameter being less recent than the second (a < b). Tag order is RTM > RC > BETA > *. Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too). :param version_a: a NSS version string :param version_b: another NSS version string :return: bool (a < b) """ a = nssversion.to_ints(version_a) b = nssversion.to_ints(version_b) # must be of equal length assert len(a) == len(b) # Compare each version component, bail out on difference for i in xrange(len(a)): if b[i] < a[i]: return False if b[i] > a[i]: return True return False
def function[first_older_than_second, parameter[version_a, version_b]]: constant[ Tests for the NSS version string in the first parameter being less recent than the second (a < b). Tag order is RTM > RC > BETA > *. Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too). :param version_a: a NSS version string :param version_b: another NSS version string :return: bool (a < b) ] variable[a] assign[=] call[name[nssversion].to_ints, parameter[name[version_a]]] variable[b] assign[=] call[name[nssversion].to_ints, parameter[name[version_b]]] assert[compare[call[name[len], parameter[name[a]]] equal[==] call[name[len], parameter[name[b]]]]] for taget[name[i]] in starred[call[name[xrange], parameter[call[name[len], parameter[name[a]]]]]] begin[:] if compare[call[name[b]][name[i]] less[<] call[name[a]][name[i]]] begin[:] return[constant[False]] if compare[call[name[b]][name[i]] greater[>] call[name[a]][name[i]]] begin[:] return[constant[True]] return[constant[False]]
keyword[def] identifier[first_older_than_second] ( identifier[version_a] , identifier[version_b] ): literal[string] identifier[a] = identifier[nssversion] . identifier[to_ints] ( identifier[version_a] ) identifier[b] = identifier[nssversion] . identifier[to_ints] ( identifier[version_b] ) keyword[assert] identifier[len] ( identifier[a] )== identifier[len] ( identifier[b] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[len] ( identifier[a] )): keyword[if] identifier[b] [ identifier[i] ]< identifier[a] [ identifier[i] ]: keyword[return] keyword[False] keyword[if] identifier[b] [ identifier[i] ]> identifier[a] [ identifier[i] ]: keyword[return] keyword[True] keyword[return] keyword[False]
def first_older_than_second(version_a, version_b): """ Tests for the NSS version string in the first parameter being less recent than the second (a < b). Tag order is RTM > RC > BETA > *. Works with hg tags like "NSS_3_7_9_RTM" and version strings reported by nsINSSVersion, like "3.18 Basic ECC Beta" (mixed, too). :param version_a: a NSS version string :param version_b: another NSS version string :return: bool (a < b) """ a = nssversion.to_ints(version_a) b = nssversion.to_ints(version_b) # must be of equal length assert len(a) == len(b) # Compare each version component, bail out on difference for i in xrange(len(a)): if b[i] < a[i]: return False # depends on [control=['if'], data=[]] if b[i] > a[i]: return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] return False
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): ''' Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] \ if ignore_epoch else six.text_type(x) # both apt_pkg.version_compare and _cmd_quote need string arguments. pkg1 = normalize(pkg1) pkg2 = normalize(pkg2) # if we have apt_pkg, this will be quickier this way # and also do not rely on shell. if HAS_APTPKG: try: # the apt_pkg module needs to be manually initialized apt_pkg.init_system() # if there is a difference in versions, apt_pkg.version_compare will # return an int representing the difference in minor versions, or # 1/-1 if the difference is smaller than minor versions. normalize # to -1, 0 or 1. try: ret = apt_pkg.version_compare(pkg1, pkg2) except TypeError: ret = apt_pkg.version_compare(six.text_type(pkg1), six.text_type(pkg2)) return 1 if ret > 0 else -1 if ret < 0 else 0 except Exception: # Try to use shell version in case of errors w/python bindings pass try: for oper, ret in (('lt', -1), ('eq', 0), ('gt', 1)): cmd = ['dpkg', '--compare-versions', pkg1, oper, pkg2] retcode = __salt__['cmd.retcode'](cmd, output_loglevel='trace', python_shell=False, ignore_retcode=True) if retcode == 0: return ret except Exception as exc: log.error(exc) return None
def function[version_cmp, parameter[pkg1, pkg2, ignore_epoch]]: constant[ Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1' ] variable[normalize] assign[=] <ast.Lambda object at 0x7da1b21ae440> variable[pkg1] assign[=] call[name[normalize], parameter[name[pkg1]]] variable[pkg2] assign[=] call[name[normalize], parameter[name[pkg2]]] if name[HAS_APTPKG] begin[:] <ast.Try object at 0x7da1b21acaf0> <ast.Try object at 0x7da20c6a9540> return[constant[None]]
keyword[def] identifier[version_cmp] ( identifier[pkg1] , identifier[pkg2] , identifier[ignore_epoch] = keyword[False] ,** identifier[kwargs] ): literal[string] identifier[normalize] = keyword[lambda] identifier[x] : identifier[six] . identifier[text_type] ( identifier[x] ). identifier[split] ( literal[string] , literal[int] )[- literal[int] ] keyword[if] identifier[ignore_epoch] keyword[else] identifier[six] . identifier[text_type] ( identifier[x] ) identifier[pkg1] = identifier[normalize] ( identifier[pkg1] ) identifier[pkg2] = identifier[normalize] ( identifier[pkg2] ) keyword[if] identifier[HAS_APTPKG] : keyword[try] : identifier[apt_pkg] . identifier[init_system] () keyword[try] : identifier[ret] = identifier[apt_pkg] . identifier[version_compare] ( identifier[pkg1] , identifier[pkg2] ) keyword[except] identifier[TypeError] : identifier[ret] = identifier[apt_pkg] . identifier[version_compare] ( identifier[six] . identifier[text_type] ( identifier[pkg1] ), identifier[six] . identifier[text_type] ( identifier[pkg2] )) keyword[return] literal[int] keyword[if] identifier[ret] > literal[int] keyword[else] - literal[int] keyword[if] identifier[ret] < literal[int] keyword[else] literal[int] keyword[except] identifier[Exception] : keyword[pass] keyword[try] : keyword[for] identifier[oper] , identifier[ret] keyword[in] (( literal[string] ,- literal[int] ),( literal[string] , literal[int] ),( literal[string] , literal[int] )): identifier[cmd] =[ literal[string] , literal[string] , identifier[pkg1] , identifier[oper] , identifier[pkg2] ] identifier[retcode] = identifier[__salt__] [ literal[string] ]( identifier[cmd] , identifier[output_loglevel] = literal[string] , identifier[python_shell] = keyword[False] , identifier[ignore_retcode] = keyword[True] ) keyword[if] identifier[retcode] == literal[int] : keyword[return] identifier[ret] keyword[except] identifier[Exception] keyword[as] identifier[exc] : identifier[log] . identifier[error] ( identifier[exc] ) keyword[return] keyword[None]
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): """ Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2015.8.10,2016.3.2 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1' """ normalize = lambda x: six.text_type(x).split(':', 1)[-1] if ignore_epoch else six.text_type(x) # both apt_pkg.version_compare and _cmd_quote need string arguments. pkg1 = normalize(pkg1) pkg2 = normalize(pkg2) # if we have apt_pkg, this will be quickier this way # and also do not rely on shell. if HAS_APTPKG: try: # the apt_pkg module needs to be manually initialized apt_pkg.init_system() # if there is a difference in versions, apt_pkg.version_compare will # return an int representing the difference in minor versions, or # 1/-1 if the difference is smaller than minor versions. normalize # to -1, 0 or 1. try: ret = apt_pkg.version_compare(pkg1, pkg2) # depends on [control=['try'], data=[]] except TypeError: ret = apt_pkg.version_compare(six.text_type(pkg1), six.text_type(pkg2)) # depends on [control=['except'], data=[]] return 1 if ret > 0 else -1 if ret < 0 else 0 # depends on [control=['try'], data=[]] except Exception: # Try to use shell version in case of errors w/python bindings pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] try: for (oper, ret) in (('lt', -1), ('eq', 0), ('gt', 1)): cmd = ['dpkg', '--compare-versions', pkg1, oper, pkg2] retcode = __salt__['cmd.retcode'](cmd, output_loglevel='trace', python_shell=False, ignore_retcode=True) if retcode == 0: return ret # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['try'], data=[]] except Exception as exc: log.error(exc) # depends on [control=['except'], data=['exc']] return None
def get_gui_hint(self, hint): """Returns the value for specified gui hint (or a sensible default value, if this argument doesn't specify the hint). Args: hint: name of the hint to get value for Returns: value of the hint specified in yaml or a sensible default """ if hint == 'type': # 'self.kwargs.get('nargs') == 0' is there for default_iff_used, which may # have nargs: 0, so that it works similarly to 'store_const' if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0: return 'bool' # store_const is represented by checkbox, but computes default differently elif self.kwargs.get('action') == 'store_const': return 'const' return self.gui_hints.get('type', 'str') elif hint == 'default': hint_type = self.get_gui_hint('type') hint_default = self.gui_hints.get('default', None) arg_default = self.kwargs.get('default', None) preserved_value = None if 'preserved' in self.kwargs: preserved_value = config_manager.get_config_value(self.kwargs['preserved']) if hint_type == 'path': if preserved_value is not None: default = preserved_value elif hint_default is not None: default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir()) else: default = arg_default or '~' return os.path.abspath(os.path.expanduser(default)) elif hint_type == 'bool': return hint_default or arg_default or False elif hint_type == 'const': return hint_default or arg_default else: if hint_default == '$(whoami)': hint_default = getpass.getuser() return preserved_value or hint_default or arg_default or ''
def function[get_gui_hint, parameter[self, hint]]: constant[Returns the value for specified gui hint (or a sensible default value, if this argument doesn't specify the hint). Args: hint: name of the hint to get value for Returns: value of the hint specified in yaml or a sensible default ] if compare[name[hint] equal[==] constant[type]] begin[:] if <ast.BoolOp object at 0x7da1b0f0de70> begin[:] return[constant[bool]] return[call[name[self].gui_hints.get, parameter[constant[type], constant[str]]]]
keyword[def] identifier[get_gui_hint] ( identifier[self] , identifier[hint] ): literal[string] keyword[if] identifier[hint] == literal[string] : keyword[if] identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] )== literal[string] keyword[or] identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] )== literal[int] : keyword[return] literal[string] keyword[elif] identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] )== literal[string] : keyword[return] literal[string] keyword[return] identifier[self] . identifier[gui_hints] . identifier[get] ( literal[string] , literal[string] ) keyword[elif] identifier[hint] == literal[string] : identifier[hint_type] = identifier[self] . identifier[get_gui_hint] ( literal[string] ) identifier[hint_default] = identifier[self] . identifier[gui_hints] . identifier[get] ( literal[string] , keyword[None] ) identifier[arg_default] = identifier[self] . identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] ) identifier[preserved_value] = keyword[None] keyword[if] literal[string] keyword[in] identifier[self] . identifier[kwargs] : identifier[preserved_value] = identifier[config_manager] . identifier[get_config_value] ( identifier[self] . identifier[kwargs] [ literal[string] ]) keyword[if] identifier[hint_type] == literal[string] : keyword[if] identifier[preserved_value] keyword[is] keyword[not] keyword[None] : identifier[default] = identifier[preserved_value] keyword[elif] identifier[hint_default] keyword[is] keyword[not] keyword[None] : identifier[default] = identifier[hint_default] . identifier[replace] ( literal[string] , identifier[utils] . identifier[get_cwd_or_homedir] ()) keyword[else] : identifier[default] = identifier[arg_default] keyword[or] literal[string] keyword[return] identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[default] )) keyword[elif] identifier[hint_type] == literal[string] : keyword[return] identifier[hint_default] keyword[or] identifier[arg_default] keyword[or] keyword[False] keyword[elif] identifier[hint_type] == literal[string] : keyword[return] identifier[hint_default] keyword[or] identifier[arg_default] keyword[else] : keyword[if] identifier[hint_default] == literal[string] : identifier[hint_default] = identifier[getpass] . identifier[getuser] () keyword[return] identifier[preserved_value] keyword[or] identifier[hint_default] keyword[or] identifier[arg_default] keyword[or] literal[string]
def get_gui_hint(self, hint): """Returns the value for specified gui hint (or a sensible default value, if this argument doesn't specify the hint). Args: hint: name of the hint to get value for Returns: value of the hint specified in yaml or a sensible default """ if hint == 'type': # 'self.kwargs.get('nargs') == 0' is there for default_iff_used, which may # have nargs: 0, so that it works similarly to 'store_const' if self.kwargs.get('action') == 'store_true' or self.kwargs.get('nargs') == 0: return 'bool' # depends on [control=['if'], data=[]] # store_const is represented by checkbox, but computes default differently elif self.kwargs.get('action') == 'store_const': return 'const' # depends on [control=['if'], data=[]] return self.gui_hints.get('type', 'str') # depends on [control=['if'], data=[]] elif hint == 'default': hint_type = self.get_gui_hint('type') hint_default = self.gui_hints.get('default', None) arg_default = self.kwargs.get('default', None) preserved_value = None if 'preserved' in self.kwargs: preserved_value = config_manager.get_config_value(self.kwargs['preserved']) # depends on [control=['if'], data=[]] if hint_type == 'path': if preserved_value is not None: default = preserved_value # depends on [control=['if'], data=['preserved_value']] elif hint_default is not None: default = hint_default.replace('$(pwd)', utils.get_cwd_or_homedir()) # depends on [control=['if'], data=['hint_default']] else: default = arg_default or '~' return os.path.abspath(os.path.expanduser(default)) # depends on [control=['if'], data=[]] elif hint_type == 'bool': return hint_default or arg_default or False # depends on [control=['if'], data=[]] elif hint_type == 'const': return hint_default or arg_default # depends on [control=['if'], data=[]] else: if hint_default == '$(whoami)': hint_default = getpass.getuser() # depends on [control=['if'], data=['hint_default']] return preserved_value or hint_default or arg_default or '' # depends on [control=['if'], data=[]]
def _sort_layers(self): """Sort the layers by depth.""" self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0]))
def function[_sort_layers, parameter[self]]: constant[Sort the layers by depth.] name[self]._layers assign[=] call[name[OrderedDict], parameter[call[name[sorted], parameter[call[name[self]._layers.items, parameter[]]]]]]
keyword[def] identifier[_sort_layers] ( identifier[self] ): literal[string] identifier[self] . identifier[_layers] = identifier[OrderedDict] ( identifier[sorted] ( identifier[self] . identifier[_layers] . identifier[items] (), identifier[key] = keyword[lambda] identifier[t] : identifier[t] [ literal[int] ]))
def _sort_layers(self): """Sort the layers by depth.""" self._layers = OrderedDict(sorted(self._layers.items(), key=lambda t: t[0]))
def _parse_game_date_and_location(self, boxscore): """ Retrieve the game's date and location. The game's meta information, such as date, location, attendance, and duration, follow a complex parsing scheme that changes based on the layout of the page. The information should be able to be parsed and set regardless of the order and how much information is included. To do this, the meta information should be iterated through line-by-line and fields should be determined by the values that are found in each line. Parameters ---------- boxscore : PyQuery object A PyQuery object containing all of the HTML data from the boxscore. """ scheme = BOXSCORE_SCHEME["game_info"] items = [i.text() for i in boxscore(scheme).items()] game_info = items[0].split('\n') arena = None attendance = None date = None duration = None playoff_round = None time = None if game_info[0].count(',') == 2: date = ','.join(game_info[0].split(',')[0:2]).strip() time = game_info[0].split(',')[-1].strip() else: date = game_info[0] for line in game_info: if 'Arena: ' in line: arena = line.replace('Arena: ', '') if 'Attendance: ' in line: attendance = line.replace('Attendance: ', '').replace(',', '') if 'Game Duration: ' in line: duration = line.replace('Game Duration: ', '') if 'eastern first round' in line.lower() or \ 'western first round' in line.lower() or \ 'eastern second round' in line.lower() or \ 'western second round' in line.lower() or \ 'eastern conference finals' in line.lower() or \ 'western conference finals' in line.lower() or \ 'stanley cup final' in line.lower(): playoff_round = line setattr(self, '_arena', arena) setattr(self, '_attendance', attendance) setattr(self, '_date', date) setattr(self, '_duration', duration) setattr(self, '_playoff_round', playoff_round) setattr(self, '_time', time)
def function[_parse_game_date_and_location, parameter[self, boxscore]]: constant[ Retrieve the game's date and location. The game's meta information, such as date, location, attendance, and duration, follow a complex parsing scheme that changes based on the layout of the page. The information should be able to be parsed and set regardless of the order and how much information is included. To do this, the meta information should be iterated through line-by-line and fields should be determined by the values that are found in each line. Parameters ---------- boxscore : PyQuery object A PyQuery object containing all of the HTML data from the boxscore. ] variable[scheme] assign[=] call[name[BOXSCORE_SCHEME]][constant[game_info]] variable[items] assign[=] <ast.ListComp object at 0x7da1b0bf28c0> variable[game_info] assign[=] call[call[name[items]][constant[0]].split, parameter[constant[ ]]] variable[arena] assign[=] constant[None] variable[attendance] assign[=] constant[None] variable[date] assign[=] constant[None] variable[duration] assign[=] constant[None] variable[playoff_round] assign[=] constant[None] variable[time] assign[=] constant[None] if compare[call[call[name[game_info]][constant[0]].count, parameter[constant[,]]] equal[==] constant[2]] begin[:] variable[date] assign[=] call[call[constant[,].join, parameter[call[call[call[name[game_info]][constant[0]].split, parameter[constant[,]]]][<ast.Slice object at 0x7da1b0cf7610>]]].strip, parameter[]] variable[time] assign[=] call[call[call[call[name[game_info]][constant[0]].split, parameter[constant[,]]]][<ast.UnaryOp object at 0x7da1b0cf6890>].strip, parameter[]] for taget[name[line]] in starred[name[game_info]] begin[:] if compare[constant[Arena: ] in name[line]] begin[:] variable[arena] assign[=] call[name[line].replace, parameter[constant[Arena: ], constant[]]] if compare[constant[Attendance: ] in name[line]] begin[:] variable[attendance] assign[=] call[call[name[line].replace, parameter[constant[Attendance: ], constant[]]].replace, parameter[constant[,], constant[]]] if compare[constant[Game Duration: ] in name[line]] begin[:] variable[duration] assign[=] call[name[line].replace, parameter[constant[Game Duration: ], constant[]]] if <ast.BoolOp object at 0x7da1b0cf70d0> begin[:] variable[playoff_round] assign[=] name[line] call[name[setattr], parameter[name[self], constant[_arena], name[arena]]] call[name[setattr], parameter[name[self], constant[_attendance], name[attendance]]] call[name[setattr], parameter[name[self], constant[_date], name[date]]] call[name[setattr], parameter[name[self], constant[_duration], name[duration]]] call[name[setattr], parameter[name[self], constant[_playoff_round], name[playoff_round]]] call[name[setattr], parameter[name[self], constant[_time], name[time]]]
keyword[def] identifier[_parse_game_date_and_location] ( identifier[self] , identifier[boxscore] ): literal[string] identifier[scheme] = identifier[BOXSCORE_SCHEME] [ literal[string] ] identifier[items] =[ identifier[i] . identifier[text] () keyword[for] identifier[i] keyword[in] identifier[boxscore] ( identifier[scheme] ). identifier[items] ()] identifier[game_info] = identifier[items] [ literal[int] ]. identifier[split] ( literal[string] ) identifier[arena] = keyword[None] identifier[attendance] = keyword[None] identifier[date] = keyword[None] identifier[duration] = keyword[None] identifier[playoff_round] = keyword[None] identifier[time] = keyword[None] keyword[if] identifier[game_info] [ literal[int] ]. identifier[count] ( literal[string] )== literal[int] : identifier[date] = literal[string] . identifier[join] ( identifier[game_info] [ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] : literal[int] ]). identifier[strip] () identifier[time] = identifier[game_info] [ literal[int] ]. identifier[split] ( literal[string] )[- literal[int] ]. identifier[strip] () keyword[else] : identifier[date] = identifier[game_info] [ literal[int] ] keyword[for] identifier[line] keyword[in] identifier[game_info] : keyword[if] literal[string] keyword[in] identifier[line] : identifier[arena] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] literal[string] keyword[in] identifier[line] : identifier[attendance] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] ) keyword[if] literal[string] keyword[in] identifier[line] : identifier[duration] = identifier[line] . identifier[replace] ( literal[string] , literal[string] ) keyword[if] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] () keyword[or] literal[string] keyword[in] identifier[line] . identifier[lower] (): identifier[playoff_round] = identifier[line] identifier[setattr] ( identifier[self] , literal[string] , identifier[arena] ) identifier[setattr] ( identifier[self] , literal[string] , identifier[attendance] ) identifier[setattr] ( identifier[self] , literal[string] , identifier[date] ) identifier[setattr] ( identifier[self] , literal[string] , identifier[duration] ) identifier[setattr] ( identifier[self] , literal[string] , identifier[playoff_round] ) identifier[setattr] ( identifier[self] , literal[string] , identifier[time] )
def _parse_game_date_and_location(self, boxscore): """ Retrieve the game's date and location. The game's meta information, such as date, location, attendance, and duration, follow a complex parsing scheme that changes based on the layout of the page. The information should be able to be parsed and set regardless of the order and how much information is included. To do this, the meta information should be iterated through line-by-line and fields should be determined by the values that are found in each line. Parameters ---------- boxscore : PyQuery object A PyQuery object containing all of the HTML data from the boxscore. """ scheme = BOXSCORE_SCHEME['game_info'] items = [i.text() for i in boxscore(scheme).items()] game_info = items[0].split('\n') arena = None attendance = None date = None duration = None playoff_round = None time = None if game_info[0].count(',') == 2: date = ','.join(game_info[0].split(',')[0:2]).strip() time = game_info[0].split(',')[-1].strip() # depends on [control=['if'], data=[]] else: date = game_info[0] for line in game_info: if 'Arena: ' in line: arena = line.replace('Arena: ', '') # depends on [control=['if'], data=['line']] if 'Attendance: ' in line: attendance = line.replace('Attendance: ', '').replace(',', '') # depends on [control=['if'], data=['line']] if 'Game Duration: ' in line: duration = line.replace('Game Duration: ', '') # depends on [control=['if'], data=['line']] if 'eastern first round' in line.lower() or 'western first round' in line.lower() or 'eastern second round' in line.lower() or ('western second round' in line.lower()) or ('eastern conference finals' in line.lower()) or ('western conference finals' in line.lower()) or ('stanley cup final' in line.lower()): playoff_round = line # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] setattr(self, '_arena', arena) setattr(self, '_attendance', attendance) setattr(self, '_date', date) setattr(self, '_duration', duration) setattr(self, '_playoff_round', playoff_round) setattr(self, '_time', time)
def osc_fit_fun(x, a, tau, f, phi, c): """Function used to fit the decay cosine.""" # pylint: disable=invalid-name return a * np.exp(-x / tau) * np.cos(2 * np.pi * f * x + phi) + c
def function[osc_fit_fun, parameter[x, a, tau, f, phi, c]]: constant[Function used to fit the decay cosine.] return[binary_operation[binary_operation[binary_operation[name[a] * call[name[np].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b055eef0> / name[tau]]]]] * call[name[np].cos, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * name[np].pi] * name[f]] * name[x]] + name[phi]]]]] + name[c]]]
keyword[def] identifier[osc_fit_fun] ( identifier[x] , identifier[a] , identifier[tau] , identifier[f] , identifier[phi] , identifier[c] ): literal[string] keyword[return] identifier[a] * identifier[np] . identifier[exp] (- identifier[x] / identifier[tau] )* identifier[np] . identifier[cos] ( literal[int] * identifier[np] . identifier[pi] * identifier[f] * identifier[x] + identifier[phi] )+ identifier[c]
def osc_fit_fun(x, a, tau, f, phi, c): """Function used to fit the decay cosine.""" # pylint: disable=invalid-name return a * np.exp(-x / tau) * np.cos(2 * np.pi * f * x + phi) + c
def dqdv_frames(cell, split=False, **kwargs): """Returns dqdv data as pandas.DataFrame(s) for all cycles. Args: cell (CellpyData-object). split (bool): return one frame for charge and one for discharge if True (defaults to False). Returns: pandas.DataFrame(s) with the following columns: cycle: cycle number (if split is set to True). voltage: voltage dq: the incremental capacity Example: >>> from cellpy.utils import ica >>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True) >>> charge_df.plot(x=("voltage", "v")) """ # TODO: should add option for normalising based on first cycle capacity # this is e.g. done by first finding the first cycle capacity (nom_cap) # (or use nominal capacity given as input) and then propagating this to # Converter using the key-word arguments # normalize=True, normalization_factor=1.0, normalization_roof=nom_cap if split: return _dqdv_split_frames(cell, tidy=True, **kwargs) else: return _dqdv_combinded_frame(cell, **kwargs)
def function[dqdv_frames, parameter[cell, split]]: constant[Returns dqdv data as pandas.DataFrame(s) for all cycles. Args: cell (CellpyData-object). split (bool): return one frame for charge and one for discharge if True (defaults to False). Returns: pandas.DataFrame(s) with the following columns: cycle: cycle number (if split is set to True). voltage: voltage dq: the incremental capacity Example: >>> from cellpy.utils import ica >>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True) >>> charge_df.plot(x=("voltage", "v")) ] if name[split] begin[:] return[call[name[_dqdv_split_frames], parameter[name[cell]]]]
keyword[def] identifier[dqdv_frames] ( identifier[cell] , identifier[split] = keyword[False] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[split] : keyword[return] identifier[_dqdv_split_frames] ( identifier[cell] , identifier[tidy] = keyword[True] ,** identifier[kwargs] ) keyword[else] : keyword[return] identifier[_dqdv_combinded_frame] ( identifier[cell] ,** identifier[kwargs] )
def dqdv_frames(cell, split=False, **kwargs): """Returns dqdv data as pandas.DataFrame(s) for all cycles. Args: cell (CellpyData-object). split (bool): return one frame for charge and one for discharge if True (defaults to False). Returns: pandas.DataFrame(s) with the following columns: cycle: cycle number (if split is set to True). voltage: voltage dq: the incremental capacity Example: >>> from cellpy.utils import ica >>> charge_df, dcharge_df = ica.ica_frames(my_cell, split=True) >>> charge_df.plot(x=("voltage", "v")) """ # TODO: should add option for normalising based on first cycle capacity # this is e.g. done by first finding the first cycle capacity (nom_cap) # (or use nominal capacity given as input) and then propagating this to # Converter using the key-word arguments # normalize=True, normalization_factor=1.0, normalization_roof=nom_cap if split: return _dqdv_split_frames(cell, tidy=True, **kwargs) # depends on [control=['if'], data=[]] else: return _dqdv_combinded_frame(cell, **kwargs)
def getAsWmsDatasetString(self, session): """ Retrieve the WMS Raster as a string in the WMS Dataset format """ # Magic numbers FIRST_VALUE_INDEX = 12 # Write value raster if type(self.raster) != type(None): # Convert to GRASS ASCII Raster valueGrassRasterString = self.getAsGrassAsciiGrid(session) # Split by lines values = valueGrassRasterString.split() # Assemble into string wmsDatasetString = '' for i in range(FIRST_VALUE_INDEX, len(values)): wmsDatasetString += '{0:.6f}\r\n'.format(float(values[i])) return wmsDatasetString else: wmsDatasetString = self.rasterText
def function[getAsWmsDatasetString, parameter[self, session]]: constant[ Retrieve the WMS Raster as a string in the WMS Dataset format ] variable[FIRST_VALUE_INDEX] assign[=] constant[12] if compare[call[name[type], parameter[name[self].raster]] not_equal[!=] call[name[type], parameter[constant[None]]]] begin[:] variable[valueGrassRasterString] assign[=] call[name[self].getAsGrassAsciiGrid, parameter[name[session]]] variable[values] assign[=] call[name[valueGrassRasterString].split, parameter[]] variable[wmsDatasetString] assign[=] constant[] for taget[name[i]] in starred[call[name[range], parameter[name[FIRST_VALUE_INDEX], call[name[len], parameter[name[values]]]]]] begin[:] <ast.AugAssign object at 0x7da18dc048b0> return[name[wmsDatasetString]]
keyword[def] identifier[getAsWmsDatasetString] ( identifier[self] , identifier[session] ): literal[string] identifier[FIRST_VALUE_INDEX] = literal[int] keyword[if] identifier[type] ( identifier[self] . identifier[raster] )!= identifier[type] ( keyword[None] ): identifier[valueGrassRasterString] = identifier[self] . identifier[getAsGrassAsciiGrid] ( identifier[session] ) identifier[values] = identifier[valueGrassRasterString] . identifier[split] () identifier[wmsDatasetString] = literal[string] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[FIRST_VALUE_INDEX] , identifier[len] ( identifier[values] )): identifier[wmsDatasetString] += literal[string] . identifier[format] ( identifier[float] ( identifier[values] [ identifier[i] ])) keyword[return] identifier[wmsDatasetString] keyword[else] : identifier[wmsDatasetString] = identifier[self] . identifier[rasterText]
def getAsWmsDatasetString(self, session): """ Retrieve the WMS Raster as a string in the WMS Dataset format """ # Magic numbers FIRST_VALUE_INDEX = 12 # Write value raster if type(self.raster) != type(None): # Convert to GRASS ASCII Raster valueGrassRasterString = self.getAsGrassAsciiGrid(session) # Split by lines values = valueGrassRasterString.split() # Assemble into string wmsDatasetString = '' for i in range(FIRST_VALUE_INDEX, len(values)): wmsDatasetString += '{0:.6f}\r\n'.format(float(values[i])) # depends on [control=['for'], data=['i']] return wmsDatasetString # depends on [control=['if'], data=[]] else: wmsDatasetString = self.rasterText
def check_indent_level(self, string, expected, line_num): """return the indent level of the string """ indent = self.config.indent_string if indent == "\\t": # \t is not interpreted in the configuration file indent = "\t" level = 0 unit_size = len(indent) while string[:unit_size] == indent: string = string[unit_size:] level += 1 suppl = "" while string and string[0] in " \t": if string[0] != indent[0]: if string[0] == "\t": args = ("tab", "space") else: args = ("space", "tab") self.add_message("mixed-indentation", args=args, line=line_num) return level suppl += string[0] string = string[1:] if level != expected or suppl: i_type = "spaces" if indent[0] == "\t": i_type = "tabs" self.add_message( "bad-indentation", line=line_num, args=(level * unit_size + len(suppl), i_type, expected * unit_size), ) return None
def function[check_indent_level, parameter[self, string, expected, line_num]]: constant[return the indent level of the string ] variable[indent] assign[=] name[self].config.indent_string if compare[name[indent] equal[==] constant[\t]] begin[:] variable[indent] assign[=] constant[ ] variable[level] assign[=] constant[0] variable[unit_size] assign[=] call[name[len], parameter[name[indent]]] while compare[call[name[string]][<ast.Slice object at 0x7da1b03156c0>] equal[==] name[indent]] begin[:] variable[string] assign[=] call[name[string]][<ast.Slice object at 0x7da1b0316800>] <ast.AugAssign object at 0x7da1b03162c0> variable[suppl] assign[=] constant[] while <ast.BoolOp object at 0x7da1b0314c70> begin[:] if compare[call[name[string]][constant[0]] not_equal[!=] call[name[indent]][constant[0]]] begin[:] if compare[call[name[string]][constant[0]] equal[==] constant[ ]] begin[:] variable[args] assign[=] tuple[[<ast.Constant object at 0x7da1b033a800>, <ast.Constant object at 0x7da1b033ad40>]] call[name[self].add_message, parameter[constant[mixed-indentation]]] return[name[level]] <ast.AugAssign object at 0x7da1b0317940> variable[string] assign[=] call[name[string]][<ast.Slice object at 0x7da1b020f940>] if <ast.BoolOp object at 0x7da1b020c2e0> begin[:] variable[i_type] assign[=] constant[spaces] if compare[call[name[indent]][constant[0]] equal[==] constant[ ]] begin[:] variable[i_type] assign[=] constant[tabs] call[name[self].add_message, parameter[constant[bad-indentation]]] return[constant[None]]
keyword[def] identifier[check_indent_level] ( identifier[self] , identifier[string] , identifier[expected] , identifier[line_num] ): literal[string] identifier[indent] = identifier[self] . identifier[config] . identifier[indent_string] keyword[if] identifier[indent] == literal[string] : identifier[indent] = literal[string] identifier[level] = literal[int] identifier[unit_size] = identifier[len] ( identifier[indent] ) keyword[while] identifier[string] [: identifier[unit_size] ]== identifier[indent] : identifier[string] = identifier[string] [ identifier[unit_size] :] identifier[level] += literal[int] identifier[suppl] = literal[string] keyword[while] identifier[string] keyword[and] identifier[string] [ literal[int] ] keyword[in] literal[string] : keyword[if] identifier[string] [ literal[int] ]!= identifier[indent] [ literal[int] ]: keyword[if] identifier[string] [ literal[int] ]== literal[string] : identifier[args] =( literal[string] , literal[string] ) keyword[else] : identifier[args] =( literal[string] , literal[string] ) identifier[self] . identifier[add_message] ( literal[string] , identifier[args] = identifier[args] , identifier[line] = identifier[line_num] ) keyword[return] identifier[level] identifier[suppl] += identifier[string] [ literal[int] ] identifier[string] = identifier[string] [ literal[int] :] keyword[if] identifier[level] != identifier[expected] keyword[or] identifier[suppl] : identifier[i_type] = literal[string] keyword[if] identifier[indent] [ literal[int] ]== literal[string] : identifier[i_type] = literal[string] identifier[self] . identifier[add_message] ( literal[string] , identifier[line] = identifier[line_num] , identifier[args] =( identifier[level] * identifier[unit_size] + identifier[len] ( identifier[suppl] ), identifier[i_type] , identifier[expected] * identifier[unit_size] ), ) keyword[return] keyword[None]
def check_indent_level(self, string, expected, line_num): """return the indent level of the string """ indent = self.config.indent_string if indent == '\\t': # \t is not interpreted in the configuration file indent = '\t' # depends on [control=['if'], data=['indent']] level = 0 unit_size = len(indent) while string[:unit_size] == indent: string = string[unit_size:] level += 1 # depends on [control=['while'], data=[]] suppl = '' while string and string[0] in ' \t': if string[0] != indent[0]: if string[0] == '\t': args = ('tab', 'space') # depends on [control=['if'], data=[]] else: args = ('space', 'tab') self.add_message('mixed-indentation', args=args, line=line_num) return level # depends on [control=['if'], data=[]] suppl += string[0] string = string[1:] # depends on [control=['while'], data=[]] if level != expected or suppl: i_type = 'spaces' if indent[0] == '\t': i_type = 'tabs' # depends on [control=['if'], data=[]] self.add_message('bad-indentation', line=line_num, args=(level * unit_size + len(suppl), i_type, expected * unit_size)) # depends on [control=['if'], data=[]] return None
def beforeRender(self, ctx): """ Implement this hook to initialize the L{initialPerson} and L{initialState} slots with information from the request url's query args. """ # see the comment in Organizer.urlForViewState which suggests an # alternate implementation of this kind of functionality. request = inevow.IRequest(ctx) if not set(['initial-person', 'initial-state']).issubset( # <= set(request.args)): return initialPersonName = request.args['initial-person'][0].decode('utf-8') initialPerson = self.store.findFirst( Person, Person.name == initialPersonName) if initialPerson is None: return initialState = request.args['initial-state'][0].decode('utf-8') if initialState not in ORGANIZER_VIEW_STATES.ALL_STATES: return self.initialPerson = initialPerson self.initialState = initialState
def function[beforeRender, parameter[self, ctx]]: constant[ Implement this hook to initialize the L{initialPerson} and L{initialState} slots with information from the request url's query args. ] variable[request] assign[=] call[name[inevow].IRequest, parameter[name[ctx]]] if <ast.UnaryOp object at 0x7da1b0a0e7d0> begin[:] return[None] variable[initialPersonName] assign[=] call[call[call[name[request].args][constant[initial-person]]][constant[0]].decode, parameter[constant[utf-8]]] variable[initialPerson] assign[=] call[name[self].store.findFirst, parameter[name[Person], compare[name[Person].name equal[==] name[initialPersonName]]]] if compare[name[initialPerson] is constant[None]] begin[:] return[None] variable[initialState] assign[=] call[call[call[name[request].args][constant[initial-state]]][constant[0]].decode, parameter[constant[utf-8]]] if compare[name[initialState] <ast.NotIn object at 0x7da2590d7190> name[ORGANIZER_VIEW_STATES].ALL_STATES] begin[:] return[None] name[self].initialPerson assign[=] name[initialPerson] name[self].initialState assign[=] name[initialState]
keyword[def] identifier[beforeRender] ( identifier[self] , identifier[ctx] ): literal[string] identifier[request] = identifier[inevow] . identifier[IRequest] ( identifier[ctx] ) keyword[if] keyword[not] identifier[set] ([ literal[string] , literal[string] ]). identifier[issubset] ( identifier[set] ( identifier[request] . identifier[args] )): keyword[return] identifier[initialPersonName] = identifier[request] . identifier[args] [ literal[string] ][ literal[int] ]. identifier[decode] ( literal[string] ) identifier[initialPerson] = identifier[self] . identifier[store] . identifier[findFirst] ( identifier[Person] , identifier[Person] . identifier[name] == identifier[initialPersonName] ) keyword[if] identifier[initialPerson] keyword[is] keyword[None] : keyword[return] identifier[initialState] = identifier[request] . identifier[args] [ literal[string] ][ literal[int] ]. identifier[decode] ( literal[string] ) keyword[if] identifier[initialState] keyword[not] keyword[in] identifier[ORGANIZER_VIEW_STATES] . identifier[ALL_STATES] : keyword[return] identifier[self] . identifier[initialPerson] = identifier[initialPerson] identifier[self] . identifier[initialState] = identifier[initialState]
def beforeRender(self, ctx): """ Implement this hook to initialize the L{initialPerson} and L{initialState} slots with information from the request url's query args. """ # see the comment in Organizer.urlForViewState which suggests an # alternate implementation of this kind of functionality. request = inevow.IRequest(ctx) if not set(['initial-person', 'initial-state']).issubset(set(request.args)): # <= return # depends on [control=['if'], data=[]] initialPersonName = request.args['initial-person'][0].decode('utf-8') initialPerson = self.store.findFirst(Person, Person.name == initialPersonName) if initialPerson is None: return # depends on [control=['if'], data=[]] initialState = request.args['initial-state'][0].decode('utf-8') if initialState not in ORGANIZER_VIEW_STATES.ALL_STATES: return # depends on [control=['if'], data=[]] self.initialPerson = initialPerson self.initialState = initialState
def params_as_tensors_for(*objs, convert=True): """ Context manager which changes the representation of parameters and data holders for the specific parameterized object(s). This can also be used to turn off tensor conversion functions wrapped with `params_as_tensors`: ``` @gpflow.params_as_tensors def compute_something(self): # self is parameterized object. s = tf.reduce_sum(self.a) # self.a is a parameter. with params_as_tensors_for(self, convert=False): b = self.c.constrained_tensor return s + b ``` :param objs: one or more instances of classes deriving from Parameterized :param convert: Flag which is used for turning tensor convertion feature on, `True`, or turning it off, `False`. """ objs = set(objs) # remove duplicate objects so the tensor mode won't be changed before saving prev_values = [_params_as_tensors_enter(o, convert) for o in objs] try: yield finally: for o, pv in reversed(list(zip(objs, prev_values))): _params_as_tensors_exit(o, pv)
def function[params_as_tensors_for, parameter[]]: constant[ Context manager which changes the representation of parameters and data holders for the specific parameterized object(s). This can also be used to turn off tensor conversion functions wrapped with `params_as_tensors`: ``` @gpflow.params_as_tensors def compute_something(self): # self is parameterized object. s = tf.reduce_sum(self.a) # self.a is a parameter. with params_as_tensors_for(self, convert=False): b = self.c.constrained_tensor return s + b ``` :param objs: one or more instances of classes deriving from Parameterized :param convert: Flag which is used for turning tensor convertion feature on, `True`, or turning it off, `False`. ] variable[objs] assign[=] call[name[set], parameter[name[objs]]] variable[prev_values] assign[=] <ast.ListComp object at 0x7da18c4ccf10> <ast.Try object at 0x7da18c4cd4b0>
keyword[def] identifier[params_as_tensors_for] (* identifier[objs] , identifier[convert] = keyword[True] ): literal[string] identifier[objs] = identifier[set] ( identifier[objs] ) identifier[prev_values] =[ identifier[_params_as_tensors_enter] ( identifier[o] , identifier[convert] ) keyword[for] identifier[o] keyword[in] identifier[objs] ] keyword[try] : keyword[yield] keyword[finally] : keyword[for] identifier[o] , identifier[pv] keyword[in] identifier[reversed] ( identifier[list] ( identifier[zip] ( identifier[objs] , identifier[prev_values] ))): identifier[_params_as_tensors_exit] ( identifier[o] , identifier[pv] )
def params_as_tensors_for(*objs, convert=True): """ Context manager which changes the representation of parameters and data holders for the specific parameterized object(s). This can also be used to turn off tensor conversion functions wrapped with `params_as_tensors`: ``` @gpflow.params_as_tensors def compute_something(self): # self is parameterized object. s = tf.reduce_sum(self.a) # self.a is a parameter. with params_as_tensors_for(self, convert=False): b = self.c.constrained_tensor return s + b ``` :param objs: one or more instances of classes deriving from Parameterized :param convert: Flag which is used for turning tensor convertion feature on, `True`, or turning it off, `False`. """ objs = set(objs) # remove duplicate objects so the tensor mode won't be changed before saving prev_values = [_params_as_tensors_enter(o, convert) for o in objs] try: yield # depends on [control=['try'], data=[]] finally: for (o, pv) in reversed(list(zip(objs, prev_values))): _params_as_tensors_exit(o, pv) # depends on [control=['for'], data=[]]
def _get_csrf_token(self): """Return the CSRF Token of easyname login form.""" from bs4 import BeautifulSoup home_response = self.session.get(self.URLS['login']) self._log('Home', home_response) assert home_response.status_code == 200, \ 'Could not load Easyname login page.' html = BeautifulSoup(home_response.content, 'html.parser') self._log('Home', html) csrf_token_field = html.find('input', {'id': 'loginxtoken'}) assert csrf_token_field is not None, 'Could not find login token.' return csrf_token_field['value']
def function[_get_csrf_token, parameter[self]]: constant[Return the CSRF Token of easyname login form.] from relative_module[bs4] import module[BeautifulSoup] variable[home_response] assign[=] call[name[self].session.get, parameter[call[name[self].URLS][constant[login]]]] call[name[self]._log, parameter[constant[Home], name[home_response]]] assert[compare[name[home_response].status_code equal[==] constant[200]]] variable[html] assign[=] call[name[BeautifulSoup], parameter[name[home_response].content, constant[html.parser]]] call[name[self]._log, parameter[constant[Home], name[html]]] variable[csrf_token_field] assign[=] call[name[html].find, parameter[constant[input], dictionary[[<ast.Constant object at 0x7da1b1d5d030>], [<ast.Constant object at 0x7da1b1d5d120>]]]] assert[compare[name[csrf_token_field] is_not constant[None]]] return[call[name[csrf_token_field]][constant[value]]]
keyword[def] identifier[_get_csrf_token] ( identifier[self] ): literal[string] keyword[from] identifier[bs4] keyword[import] identifier[BeautifulSoup] identifier[home_response] = identifier[self] . identifier[session] . identifier[get] ( identifier[self] . identifier[URLS] [ literal[string] ]) identifier[self] . identifier[_log] ( literal[string] , identifier[home_response] ) keyword[assert] identifier[home_response] . identifier[status_code] == literal[int] , literal[string] identifier[html] = identifier[BeautifulSoup] ( identifier[home_response] . identifier[content] , literal[string] ) identifier[self] . identifier[_log] ( literal[string] , identifier[html] ) identifier[csrf_token_field] = identifier[html] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] }) keyword[assert] identifier[csrf_token_field] keyword[is] keyword[not] keyword[None] , literal[string] keyword[return] identifier[csrf_token_field] [ literal[string] ]
def _get_csrf_token(self): """Return the CSRF Token of easyname login form.""" from bs4 import BeautifulSoup home_response = self.session.get(self.URLS['login']) self._log('Home', home_response) assert home_response.status_code == 200, 'Could not load Easyname login page.' html = BeautifulSoup(home_response.content, 'html.parser') self._log('Home', html) csrf_token_field = html.find('input', {'id': 'loginxtoken'}) assert csrf_token_field is not None, 'Could not find login token.' return csrf_token_field['value']
def get_network(network_id): """Get the network with the given id.""" try: net = models.Network.query.filter_by(id=network_id).one() except NoResultFound: return error_response( error_type="/network GET: no network found", status=403) # return the data return success_response(field="network", data=net.__json__(), request_type="network get")
def function[get_network, parameter[network_id]]: constant[Get the network with the given id.] <ast.Try object at 0x7da18f58fa30> return[call[name[success_response], parameter[]]]
keyword[def] identifier[get_network] ( identifier[network_id] ): literal[string] keyword[try] : identifier[net] = identifier[models] . identifier[Network] . identifier[query] . identifier[filter_by] ( identifier[id] = identifier[network_id] ). identifier[one] () keyword[except] identifier[NoResultFound] : keyword[return] identifier[error_response] ( identifier[error_type] = literal[string] , identifier[status] = literal[int] ) keyword[return] identifier[success_response] ( identifier[field] = literal[string] , identifier[data] = identifier[net] . identifier[__json__] (), identifier[request_type] = literal[string] )
def get_network(network_id): """Get the network with the given id.""" try: net = models.Network.query.filter_by(id=network_id).one() # depends on [control=['try'], data=[]] except NoResultFound: return error_response(error_type='/network GET: no network found', status=403) # depends on [control=['except'], data=[]] # return the data return success_response(field='network', data=net.__json__(), request_type='network get')
async def freedom(self, root): """Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as expressive adjectives. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with \ keys and values of str Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``. """ elem = root.find('FREEDOM') result = OrderedDict() result['Civil Rights'] = elem.find('CIVILRIGHTS').text result['Economy'] = elem.find('ECONOMY').text result['Political Freedom'] = elem.find('POLITICALFREEDOM').text return result
<ast.AsyncFunctionDef object at 0x7da1b28fe980>
keyword[async] keyword[def] identifier[freedom] ( identifier[self] , identifier[root] ): literal[string] identifier[elem] = identifier[root] . identifier[find] ( literal[string] ) identifier[result] = identifier[OrderedDict] () identifier[result] [ literal[string] ]= identifier[elem] . identifier[find] ( literal[string] ). identifier[text] identifier[result] [ literal[string] ]= identifier[elem] . identifier[find] ( literal[string] ). identifier[text] identifier[result] [ literal[string] ]= identifier[elem] . identifier[find] ( literal[string] ). identifier[text] keyword[return] identifier[result]
async def freedom(self, root): """Nation's `Freedoms`: three basic indicators of the nation's Civil Rights, Economy, and Political Freedom, as expressive adjectives. Returns ------- an :class:`ApiQuery` of :class:`collections.OrderedDict` with keys and values of str Keys being, in order: ``Civil Rights``, ``Economy``, and ``Political Freedom``. """ elem = root.find('FREEDOM') result = OrderedDict() result['Civil Rights'] = elem.find('CIVILRIGHTS').text result['Economy'] = elem.find('ECONOMY').text result['Political Freedom'] = elem.find('POLITICALFREEDOM').text return result
def getInstalledConfig(installDir, configFile): """ Reads config from the installation directory of Plenum. :param installDir: installation directory of Plenum :param configFile: name of the configuration file :raises: FileNotFoundError :return: the configuration as a python object """ configPath = os.path.join(installDir, configFile) if not os.path.exists(configPath): raise FileNotFoundError("No file found at location {}". format(configPath)) spec = spec_from_file_location(configFile, configPath) config = module_from_spec(spec) spec.loader.exec_module(config) return config
def function[getInstalledConfig, parameter[installDir, configFile]]: constant[ Reads config from the installation directory of Plenum. :param installDir: installation directory of Plenum :param configFile: name of the configuration file :raises: FileNotFoundError :return: the configuration as a python object ] variable[configPath] assign[=] call[name[os].path.join, parameter[name[installDir], name[configFile]]] if <ast.UnaryOp object at 0x7da2047e8280> begin[:] <ast.Raise object at 0x7da2047e9b40> variable[spec] assign[=] call[name[spec_from_file_location], parameter[name[configFile], name[configPath]]] variable[config] assign[=] call[name[module_from_spec], parameter[name[spec]]] call[name[spec].loader.exec_module, parameter[name[config]]] return[name[config]]
keyword[def] identifier[getInstalledConfig] ( identifier[installDir] , identifier[configFile] ): literal[string] identifier[configPath] = identifier[os] . identifier[path] . identifier[join] ( identifier[installDir] , identifier[configFile] ) keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[configPath] ): keyword[raise] identifier[FileNotFoundError] ( literal[string] . identifier[format] ( identifier[configPath] )) identifier[spec] = identifier[spec_from_file_location] ( identifier[configFile] , identifier[configPath] ) identifier[config] = identifier[module_from_spec] ( identifier[spec] ) identifier[spec] . identifier[loader] . identifier[exec_module] ( identifier[config] ) keyword[return] identifier[config]
def getInstalledConfig(installDir, configFile): """ Reads config from the installation directory of Plenum. :param installDir: installation directory of Plenum :param configFile: name of the configuration file :raises: FileNotFoundError :return: the configuration as a python object """ configPath = os.path.join(installDir, configFile) if not os.path.exists(configPath): raise FileNotFoundError('No file found at location {}'.format(configPath)) # depends on [control=['if'], data=[]] spec = spec_from_file_location(configFile, configPath) config = module_from_spec(spec) spec.loader.exec_module(config) return config
def index_labels(labels, case_sensitive=False): """Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` """ label_to_index = {} index_to_label = {} # If we're not case-sensitive, if not case_sensitive: labels = [str(s).lower() for s in labels] # First, build the unique label mapping for index, s in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s # Remap the labels to indices indices = [label_to_index[s] for s in labels] # Return the converted labels, and the inverse mapping return indices, index_to_label
def function[index_labels, parameter[labels, case_sensitive]]: constant[Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` ] variable[label_to_index] assign[=] dictionary[[], []] variable[index_to_label] assign[=] dictionary[[], []] if <ast.UnaryOp object at 0x7da18bc701f0> begin[:] variable[labels] assign[=] <ast.ListComp object at 0x7da18bc71180> for taget[tuple[[<ast.Name object at 0x7da20e957220>, <ast.Name object at 0x7da20e954070>]]] in starred[call[name[enumerate], parameter[call[name[sorted], parameter[call[name[set], parameter[name[labels]]]]]]]] begin[:] call[name[label_to_index]][name[s]] assign[=] name[index] call[name[index_to_label]][name[index]] assign[=] name[s] variable[indices] assign[=] <ast.ListComp object at 0x7da1b0fcee90> return[tuple[[<ast.Name object at 0x7da1b0fcfa90>, <ast.Name object at 0x7da1b0fcfd90>]]]
keyword[def] identifier[index_labels] ( identifier[labels] , identifier[case_sensitive] = keyword[False] ): literal[string] identifier[label_to_index] ={} identifier[index_to_label] ={} keyword[if] keyword[not] identifier[case_sensitive] : identifier[labels] =[ identifier[str] ( identifier[s] ). identifier[lower] () keyword[for] identifier[s] keyword[in] identifier[labels] ] keyword[for] identifier[index] , identifier[s] keyword[in] identifier[enumerate] ( identifier[sorted] ( identifier[set] ( identifier[labels] ))): identifier[label_to_index] [ identifier[s] ]= identifier[index] identifier[index_to_label] [ identifier[index] ]= identifier[s] identifier[indices] =[ identifier[label_to_index] [ identifier[s] ] keyword[for] identifier[s] keyword[in] identifier[labels] ] keyword[return] identifier[indices] , identifier[index_to_label]
def index_labels(labels, case_sensitive=False): """Convert a list of string identifiers into numerical indices. Parameters ---------- labels : list of strings, shape=(n,) A list of annotations, e.g., segment or chord labels from an annotation file. case_sensitive : bool Set to True to enable case-sensitive label indexing (Default value = False) Returns ------- indices : list, shape=(n,) Numerical representation of ``labels`` index_to_label : dict Mapping to convert numerical indices back to labels. ``labels[i] == index_to_label[indices[i]]`` """ label_to_index = {} index_to_label = {} # If we're not case-sensitive, if not case_sensitive: labels = [str(s).lower() for s in labels] # depends on [control=['if'], data=[]] # First, build the unique label mapping for (index, s) in enumerate(sorted(set(labels))): label_to_index[s] = index index_to_label[index] = s # depends on [control=['for'], data=[]] # Remap the labels to indices indices = [label_to_index[s] for s in labels] # Return the converted labels, and the inverse mapping return (indices, index_to_label)
def write_dag(self, out=sys.stdout): """Write info for all GO Terms in obo file, sorted numerically.""" for rec in sorted(self.values()): print(rec, file=out)
def function[write_dag, parameter[self, out]]: constant[Write info for all GO Terms in obo file, sorted numerically.] for taget[name[rec]] in starred[call[name[sorted], parameter[call[name[self].values, parameter[]]]]] begin[:] call[name[print], parameter[name[rec]]]
keyword[def] identifier[write_dag] ( identifier[self] , identifier[out] = identifier[sys] . identifier[stdout] ): literal[string] keyword[for] identifier[rec] keyword[in] identifier[sorted] ( identifier[self] . identifier[values] ()): identifier[print] ( identifier[rec] , identifier[file] = identifier[out] )
def write_dag(self, out=sys.stdout): """Write info for all GO Terms in obo file, sorted numerically.""" for rec in sorted(self.values()): print(rec, file=out) # depends on [control=['for'], data=['rec']]
def handle_package_has_file_helper(self, pkg_file): """ Return node representing pkg_file pkg_file should be instance of spdx.file. """ nodes = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(pkg_file.name)))) if len(nodes) == 1: return nodes[0][0] else: raise InvalidDocumentError('handle_package_has_file_helper could not' + ' find file node for file: {0}'.format(pkg_file.name))
def function[handle_package_has_file_helper, parameter[self, pkg_file]]: constant[ Return node representing pkg_file pkg_file should be instance of spdx.file. ] variable[nodes] assign[=] call[name[list], parameter[call[name[self].graph.triples, parameter[tuple[[<ast.Constant object at 0x7da1b01da3b0>, <ast.Attribute object at 0x7da1b01d9a20>, <ast.Call object at 0x7da1b01d84f0>]]]]]] if compare[call[name[len], parameter[name[nodes]]] equal[==] constant[1]] begin[:] return[call[call[name[nodes]][constant[0]]][constant[0]]]
keyword[def] identifier[handle_package_has_file_helper] ( identifier[self] , identifier[pkg_file] ): literal[string] identifier[nodes] = identifier[list] ( identifier[self] . identifier[graph] . identifier[triples] (( keyword[None] , identifier[self] . identifier[spdx_namespace] . identifier[fileName] , identifier[Literal] ( identifier[pkg_file] . identifier[name] )))) keyword[if] identifier[len] ( identifier[nodes] )== literal[int] : keyword[return] identifier[nodes] [ literal[int] ][ literal[int] ] keyword[else] : keyword[raise] identifier[InvalidDocumentError] ( literal[string] + literal[string] . identifier[format] ( identifier[pkg_file] . identifier[name] ))
def handle_package_has_file_helper(self, pkg_file): """ Return node representing pkg_file pkg_file should be instance of spdx.file. """ nodes = list(self.graph.triples((None, self.spdx_namespace.fileName, Literal(pkg_file.name)))) if len(nodes) == 1: return nodes[0][0] # depends on [control=['if'], data=[]] else: raise InvalidDocumentError('handle_package_has_file_helper could not' + ' find file node for file: {0}'.format(pkg_file.name))
def rankagg_R(df, method="stuart"): """Return aggregated ranks as implemented in the RobustRankAgg R package. This function is now deprecated. References: Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709 Stuart et al., 2003, DOI: 10.1126/science.1087447 Parameters ---------- df : pandas.DataFrame DataFrame with values to be ranked and aggregated Returns ------- pandas.DataFrame with aggregated ranks """ tmpdf = NamedTemporaryFile() tmpscript = NamedTemporaryFile(mode="w") tmpranks = NamedTemporaryFile() df.to_csv(tmpdf.name, sep="\t",index=False) script = ''' library(RobustRankAggreg); a = read.table("{}", header=TRUE); x = lapply(a, as.vector); result = aggregateRanks(x, method="{}"); result$p.adjust = p.adjust(result$Score); write.table(result, file="{}", sep="\t", quote=FALSE, row.names=FALSE); '''.format(tmpdf.name, method, tmpranks.name) tmpscript.write(script) tmpscript.flush() p = sp.Popen(["Rscript", tmpscript.name], stdout=sp.PIPE, stderr=sp.PIPE) stderr, stdout = p.communicate() df = pd.read_table(tmpranks.name, index_col=0) return df["p.adjust"]
def function[rankagg_R, parameter[df, method]]: constant[Return aggregated ranks as implemented in the RobustRankAgg R package. This function is now deprecated. References: Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709 Stuart et al., 2003, DOI: 10.1126/science.1087447 Parameters ---------- df : pandas.DataFrame DataFrame with values to be ranked and aggregated Returns ------- pandas.DataFrame with aggregated ranks ] variable[tmpdf] assign[=] call[name[NamedTemporaryFile], parameter[]] variable[tmpscript] assign[=] call[name[NamedTemporaryFile], parameter[]] variable[tmpranks] assign[=] call[name[NamedTemporaryFile], parameter[]] call[name[df].to_csv, parameter[name[tmpdf].name]] variable[script] assign[=] call[constant[ library(RobustRankAggreg); a = read.table("{}", header=TRUE); x = lapply(a, as.vector); result = aggregateRanks(x, method="{}"); result$p.adjust = p.adjust(result$Score); write.table(result, file="{}", sep=" ", quote=FALSE, row.names=FALSE); ].format, parameter[name[tmpdf].name, name[method], name[tmpranks].name]] call[name[tmpscript].write, parameter[name[script]]] call[name[tmpscript].flush, parameter[]] variable[p] assign[=] call[name[sp].Popen, parameter[list[[<ast.Constant object at 0x7da2041db640>, <ast.Attribute object at 0x7da2041d8790>]]]] <ast.Tuple object at 0x7da2041d8a00> assign[=] call[name[p].communicate, parameter[]] variable[df] assign[=] call[name[pd].read_table, parameter[name[tmpranks].name]] return[call[name[df]][constant[p.adjust]]]
keyword[def] identifier[rankagg_R] ( identifier[df] , identifier[method] = literal[string] ): literal[string] identifier[tmpdf] = identifier[NamedTemporaryFile] () identifier[tmpscript] = identifier[NamedTemporaryFile] ( identifier[mode] = literal[string] ) identifier[tmpranks] = identifier[NamedTemporaryFile] () identifier[df] . identifier[to_csv] ( identifier[tmpdf] . identifier[name] , identifier[sep] = literal[string] , identifier[index] = keyword[False] ) identifier[script] = literal[string] . identifier[format] ( identifier[tmpdf] . identifier[name] , identifier[method] , identifier[tmpranks] . identifier[name] ) identifier[tmpscript] . identifier[write] ( identifier[script] ) identifier[tmpscript] . identifier[flush] () identifier[p] = identifier[sp] . identifier[Popen] ([ literal[string] , identifier[tmpscript] . identifier[name] ], identifier[stdout] = identifier[sp] . identifier[PIPE] , identifier[stderr] = identifier[sp] . identifier[PIPE] ) identifier[stderr] , identifier[stdout] = identifier[p] . identifier[communicate] () identifier[df] = identifier[pd] . identifier[read_table] ( identifier[tmpranks] . identifier[name] , identifier[index_col] = literal[int] ) keyword[return] identifier[df] [ literal[string] ]
def rankagg_R(df, method='stuart'): """Return aggregated ranks as implemented in the RobustRankAgg R package. This function is now deprecated. References: Kolde et al., 2012, DOI: 10.1093/bioinformatics/btr709 Stuart et al., 2003, DOI: 10.1126/science.1087447 Parameters ---------- df : pandas.DataFrame DataFrame with values to be ranked and aggregated Returns ------- pandas.DataFrame with aggregated ranks """ tmpdf = NamedTemporaryFile() tmpscript = NamedTemporaryFile(mode='w') tmpranks = NamedTemporaryFile() df.to_csv(tmpdf.name, sep='\t', index=False) script = ' \nlibrary(RobustRankAggreg); \na = read.table("{}", header=TRUE); \nx = lapply(a, as.vector); \nresult = aggregateRanks(x, method="{}"); \nresult$p.adjust = p.adjust(result$Score); \n write.table(result, file="{}", sep="\t", quote=FALSE, row.names=FALSE); \n'.format(tmpdf.name, method, tmpranks.name) tmpscript.write(script) tmpscript.flush() p = sp.Popen(['Rscript', tmpscript.name], stdout=sp.PIPE, stderr=sp.PIPE) (stderr, stdout) = p.communicate() df = pd.read_table(tmpranks.name, index_col=0) return df['p.adjust']
def get_needful_files(self): """ Returns set of media files associated with models. Those files won't be deleted. """ needful_files = [] for model in self.models(): media_fields = [] for field in self.model_file_fields(model): media_fields.append(field.name) if media_fields: exclude_options = {media_field: '' for media_field in media_fields} model_uploaded_media = model.objects.exclude(**exclude_options).values_list(*media_fields) needful_files.extend(model_uploaded_media) return set(chain.from_iterable(needful_files))
def function[get_needful_files, parameter[self]]: constant[ Returns set of media files associated with models. Those files won't be deleted. ] variable[needful_files] assign[=] list[[]] for taget[name[model]] in starred[call[name[self].models, parameter[]]] begin[:] variable[media_fields] assign[=] list[[]] for taget[name[field]] in starred[call[name[self].model_file_fields, parameter[name[model]]]] begin[:] call[name[media_fields].append, parameter[name[field].name]] if name[media_fields] begin[:] variable[exclude_options] assign[=] <ast.DictComp object at 0x7da1b0471540> variable[model_uploaded_media] assign[=] call[call[name[model].objects.exclude, parameter[]].values_list, parameter[<ast.Starred object at 0x7da1b04737c0>]] call[name[needful_files].extend, parameter[name[model_uploaded_media]]] return[call[name[set], parameter[call[name[chain].from_iterable, parameter[name[needful_files]]]]]]
keyword[def] identifier[get_needful_files] ( identifier[self] ): literal[string] identifier[needful_files] =[] keyword[for] identifier[model] keyword[in] identifier[self] . identifier[models] (): identifier[media_fields] =[] keyword[for] identifier[field] keyword[in] identifier[self] . identifier[model_file_fields] ( identifier[model] ): identifier[media_fields] . identifier[append] ( identifier[field] . identifier[name] ) keyword[if] identifier[media_fields] : identifier[exclude_options] ={ identifier[media_field] : literal[string] keyword[for] identifier[media_field] keyword[in] identifier[media_fields] } identifier[model_uploaded_media] = identifier[model] . identifier[objects] . identifier[exclude] (** identifier[exclude_options] ). identifier[values_list] (* identifier[media_fields] ) identifier[needful_files] . identifier[extend] ( identifier[model_uploaded_media] ) keyword[return] identifier[set] ( identifier[chain] . identifier[from_iterable] ( identifier[needful_files] ))
def get_needful_files(self): """ Returns set of media files associated with models. Those files won't be deleted. """ needful_files = [] for model in self.models(): media_fields = [] for field in self.model_file_fields(model): media_fields.append(field.name) # depends on [control=['for'], data=['field']] if media_fields: exclude_options = {media_field: '' for media_field in media_fields} model_uploaded_media = model.objects.exclude(**exclude_options).values_list(*media_fields) needful_files.extend(model_uploaded_media) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['model']] return set(chain.from_iterable(needful_files))
def home_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) return self.__wrapped_home
def function[home_shift_summ, parameter[self]]: constant[ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` ] if <ast.UnaryOp object at 0x7da1b0e0c880> begin[:] name[self].__wrapped_home assign[=] call[name[self].__wrap, parameter[name[self]._home.by_player]] return[name[self].__wrapped_home]
keyword[def] identifier[home_shift_summ] ( identifier[self] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[__wrapped_home] : identifier[self] . identifier[__wrapped_home] = identifier[self] . identifier[__wrap] ( identifier[self] . identifier[_home] . identifier[by_player] ) keyword[return] identifier[self] . identifier[__wrapped_home]
def home_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) # depends on [control=['if'], data=[]] return self.__wrapped_home
def GetAnalyzers(cls): """Retrieves the registered analyzers. Yields: tuple: containing: str: the uniquely identifying name of the analyzer type: the analyzer class. """ for analyzer_name, analyzer_class in iter(cls._analyzer_classes.items()): yield analyzer_name, analyzer_class
def function[GetAnalyzers, parameter[cls]]: constant[Retrieves the registered analyzers. Yields: tuple: containing: str: the uniquely identifying name of the analyzer type: the analyzer class. ] for taget[tuple[[<ast.Name object at 0x7da20c7c9cc0>, <ast.Name object at 0x7da20c7cb490>]]] in starred[call[name[iter], parameter[call[name[cls]._analyzer_classes.items, parameter[]]]]] begin[:] <ast.Yield object at 0x7da20c7c9390>
keyword[def] identifier[GetAnalyzers] ( identifier[cls] ): literal[string] keyword[for] identifier[analyzer_name] , identifier[analyzer_class] keyword[in] identifier[iter] ( identifier[cls] . identifier[_analyzer_classes] . identifier[items] ()): keyword[yield] identifier[analyzer_name] , identifier[analyzer_class]
def GetAnalyzers(cls): """Retrieves the registered analyzers. Yields: tuple: containing: str: the uniquely identifying name of the analyzer type: the analyzer class. """ for (analyzer_name, analyzer_class) in iter(cls._analyzer_classes.items()): yield (analyzer_name, analyzer_class) # depends on [control=['for'], data=[]]
def _load_keyring_path(config): "load the keyring-path option (if present)" try: path = config.get("backend", "keyring-path").strip() sys.path.insert(0, path) except (configparser.NoOptionError, configparser.NoSectionError): pass
def function[_load_keyring_path, parameter[config]]: constant[load the keyring-path option (if present)] <ast.Try object at 0x7da1b0061d50>
keyword[def] identifier[_load_keyring_path] ( identifier[config] ): literal[string] keyword[try] : identifier[path] = identifier[config] . identifier[get] ( literal[string] , literal[string] ). identifier[strip] () identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[path] ) keyword[except] ( identifier[configparser] . identifier[NoOptionError] , identifier[configparser] . identifier[NoSectionError] ): keyword[pass]
def _load_keyring_path(config): """load the keyring-path option (if present)""" try: path = config.get('backend', 'keyring-path').strip() sys.path.insert(0, path) # depends on [control=['try'], data=[]] except (configparser.NoOptionError, configparser.NoSectionError): pass # depends on [control=['except'], data=[]]
def pfx_path(path): """ Prefix a path with the OS path separator if it is not already """ if path[0] != os.path.sep: return os.path.sep + path else: return path
def function[pfx_path, parameter[path]]: constant[ Prefix a path with the OS path separator if it is not already ] if compare[call[name[path]][constant[0]] not_equal[!=] name[os].path.sep] begin[:] return[binary_operation[name[os].path.sep + name[path]]]
keyword[def] identifier[pfx_path] ( identifier[path] ): literal[string] keyword[if] identifier[path] [ literal[int] ]!= identifier[os] . identifier[path] . identifier[sep] : keyword[return] identifier[os] . identifier[path] . identifier[sep] + identifier[path] keyword[else] : keyword[return] identifier[path]
def pfx_path(path): """ Prefix a path with the OS path separator if it is not already """ if path[0] != os.path.sep: return os.path.sep + path # depends on [control=['if'], data=[]] else: return path
def get_time_buckets_from_metadata(metadata): '''return a list of time buckets in which the metadata falls''' start = metadata['start'] end = metadata.get('end') or start buckets = DatalakeRecord.get_time_buckets(start, end) if len(buckets) > DatalakeRecord.MAXIMUM_BUCKET_SPAN: msg = 'metadata spans too many time buckets: {}' j = json.dumps(metadata) msg = msg.format(j) raise UnsupportedTimeRange(msg) return buckets
def function[get_time_buckets_from_metadata, parameter[metadata]]: constant[return a list of time buckets in which the metadata falls] variable[start] assign[=] call[name[metadata]][constant[start]] variable[end] assign[=] <ast.BoolOp object at 0x7da1b0aa45e0> variable[buckets] assign[=] call[name[DatalakeRecord].get_time_buckets, parameter[name[start], name[end]]] if compare[call[name[len], parameter[name[buckets]]] greater[>] name[DatalakeRecord].MAXIMUM_BUCKET_SPAN] begin[:] variable[msg] assign[=] constant[metadata spans too many time buckets: {}] variable[j] assign[=] call[name[json].dumps, parameter[name[metadata]]] variable[msg] assign[=] call[name[msg].format, parameter[name[j]]] <ast.Raise object at 0x7da1b0aa4220> return[name[buckets]]
keyword[def] identifier[get_time_buckets_from_metadata] ( identifier[metadata] ): literal[string] identifier[start] = identifier[metadata] [ literal[string] ] identifier[end] = identifier[metadata] . identifier[get] ( literal[string] ) keyword[or] identifier[start] identifier[buckets] = identifier[DatalakeRecord] . identifier[get_time_buckets] ( identifier[start] , identifier[end] ) keyword[if] identifier[len] ( identifier[buckets] )> identifier[DatalakeRecord] . identifier[MAXIMUM_BUCKET_SPAN] : identifier[msg] = literal[string] identifier[j] = identifier[json] . identifier[dumps] ( identifier[metadata] ) identifier[msg] = identifier[msg] . identifier[format] ( identifier[j] ) keyword[raise] identifier[UnsupportedTimeRange] ( identifier[msg] ) keyword[return] identifier[buckets]
def get_time_buckets_from_metadata(metadata): """return a list of time buckets in which the metadata falls""" start = metadata['start'] end = metadata.get('end') or start buckets = DatalakeRecord.get_time_buckets(start, end) if len(buckets) > DatalakeRecord.MAXIMUM_BUCKET_SPAN: msg = 'metadata spans too many time buckets: {}' j = json.dumps(metadata) msg = msg.format(j) raise UnsupportedTimeRange(msg) # depends on [control=['if'], data=[]] return buckets
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) else: raise TypeError()
def function[_TypecheckDecorator, parameter[subject]]: constant[Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. ] if compare[name[subject] is constant[None]] begin[:] return[call[name[_TypecheckDecoratorFactory], parameter[name[kwargs]]]]
keyword[def] identifier[_TypecheckDecorator] ( identifier[subject] = keyword[None] ,** identifier[kwargs] ): literal[string] keyword[if] identifier[subject] keyword[is] keyword[None] : keyword[return] identifier[_TypecheckDecoratorFactory] ( identifier[kwargs] ) keyword[elif] identifier[inspect] . identifier[isfunction] ( identifier[subject] ) keyword[or] identifier[inspect] . identifier[ismethod] ( identifier[subject] ): keyword[return] identifier[_TypecheckFunction] ( identifier[subject] ,{}, literal[int] , keyword[None] ) keyword[else] : keyword[raise] identifier[TypeError] ()
def _TypecheckDecorator(subject=None, **kwargs): """Dispatches type checks based on what the subject is. Functions or methods are annotated directly. If this method is called with keyword arguments only, return a decorator. """ if subject is None: return _TypecheckDecoratorFactory(kwargs) # depends on [control=['if'], data=[]] elif inspect.isfunction(subject) or inspect.ismethod(subject): return _TypecheckFunction(subject, {}, 2, None) # depends on [control=['if'], data=[]] else: raise TypeError()
def generated_password_entropy(self) -> float: """Calculate the entropy of a password that would be generated.""" characters = self._get_password_characters() if ( self.passwordlen is None or not characters ): raise ValueError("Can't calculate the password entropy: character" " set is empty or passwordlen isn't set") if self.passwordlen == 0: return 0.0 return calc_password_entropy(self.passwordlen, characters)
def function[generated_password_entropy, parameter[self]]: constant[Calculate the entropy of a password that would be generated.] variable[characters] assign[=] call[name[self]._get_password_characters, parameter[]] if <ast.BoolOp object at 0x7da2047ea920> begin[:] <ast.Raise object at 0x7da2047e9ea0> if compare[name[self].passwordlen equal[==] constant[0]] begin[:] return[constant[0.0]] return[call[name[calc_password_entropy], parameter[name[self].passwordlen, name[characters]]]]
keyword[def] identifier[generated_password_entropy] ( identifier[self] )-> identifier[float] : literal[string] identifier[characters] = identifier[self] . identifier[_get_password_characters] () keyword[if] ( identifier[self] . identifier[passwordlen] keyword[is] keyword[None] keyword[or] keyword[not] identifier[characters] ): keyword[raise] identifier[ValueError] ( literal[string] literal[string] ) keyword[if] identifier[self] . identifier[passwordlen] == literal[int] : keyword[return] literal[int] keyword[return] identifier[calc_password_entropy] ( identifier[self] . identifier[passwordlen] , identifier[characters] )
def generated_password_entropy(self) -> float: """Calculate the entropy of a password that would be generated.""" characters = self._get_password_characters() if self.passwordlen is None or not characters: raise ValueError("Can't calculate the password entropy: character set is empty or passwordlen isn't set") # depends on [control=['if'], data=[]] if self.passwordlen == 0: return 0.0 # depends on [control=['if'], data=[]] return calc_password_entropy(self.passwordlen, characters)
def setup(): """ Initializes the hook queues for the sys module. This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions. """ global _displayhooks, _excepthooks if _displayhooks is not None: return _displayhooks = [] _excepthooks = [] # store any current hooks if sys.displayhook != sys.__displayhook__: _displayhooks.append(weakref.ref(sys.displayhook)) if sys.excepthook != sys.__excepthook__: _excepthooks.append(weakref.ref(sys.excepthook)) # replace the current hooks sys.displayhook = displayhook sys.excepthook = excepthook
def function[setup, parameter[]]: constant[ Initializes the hook queues for the sys module. This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions. ] <ast.Global object at 0x7da1b2776b00> if compare[name[_displayhooks] is_not constant[None]] begin[:] return[None] variable[_displayhooks] assign[=] list[[]] variable[_excepthooks] assign[=] list[[]] if compare[name[sys].displayhook not_equal[!=] name[sys].__displayhook__] begin[:] call[name[_displayhooks].append, parameter[call[name[weakref].ref, parameter[name[sys].displayhook]]]] if compare[name[sys].excepthook not_equal[!=] name[sys].__excepthook__] begin[:] call[name[_excepthooks].append, parameter[call[name[weakref].ref, parameter[name[sys].excepthook]]]] name[sys].displayhook assign[=] name[displayhook] name[sys].excepthook assign[=] name[excepthook]
keyword[def] identifier[setup] (): literal[string] keyword[global] identifier[_displayhooks] , identifier[_excepthooks] keyword[if] identifier[_displayhooks] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[_displayhooks] =[] identifier[_excepthooks] =[] keyword[if] identifier[sys] . identifier[displayhook] != identifier[sys] . identifier[__displayhook__] : identifier[_displayhooks] . identifier[append] ( identifier[weakref] . identifier[ref] ( identifier[sys] . identifier[displayhook] )) keyword[if] identifier[sys] . identifier[excepthook] != identifier[sys] . identifier[__excepthook__] : identifier[_excepthooks] . identifier[append] ( identifier[weakref] . identifier[ref] ( identifier[sys] . identifier[excepthook] )) identifier[sys] . identifier[displayhook] = identifier[displayhook] identifier[sys] . identifier[excepthook] = identifier[excepthook]
def setup(): """ Initializes the hook queues for the sys module. This method will automatically be called on the first registration for a hook to the system by either the registerDisplay or registerExcept functions. """ global _displayhooks, _excepthooks if _displayhooks is not None: return # depends on [control=['if'], data=[]] _displayhooks = [] _excepthooks = [] # store any current hooks if sys.displayhook != sys.__displayhook__: _displayhooks.append(weakref.ref(sys.displayhook)) # depends on [control=['if'], data=[]] if sys.excepthook != sys.__excepthook__: _excepthooks.append(weakref.ref(sys.excepthook)) # depends on [control=['if'], data=[]] # replace the current hooks sys.displayhook = displayhook sys.excepthook = excepthook
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False): ''' Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. ''' result = _equals(series1, series2, ignore_order, ignore_index, all_close) if _return_reason: return result else: return result[0]
def function[equals, parameter[series1, series2, ignore_order, ignore_index, all_close, _return_reason]]: constant[ Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. ] variable[result] assign[=] call[name[_equals], parameter[name[series1], name[series2], name[ignore_order], name[ignore_index], name[all_close]]] if name[_return_reason] begin[:] return[name[result]]
keyword[def] identifier[equals] ( identifier[series1] , identifier[series2] , identifier[ignore_order] = keyword[False] , identifier[ignore_index] = keyword[False] , identifier[all_close] = keyword[False] , identifier[_return_reason] = keyword[False] ): literal[string] identifier[result] = identifier[_equals] ( identifier[series1] , identifier[series2] , identifier[ignore_order] , identifier[ignore_index] , identifier[all_close] ) keyword[if] identifier[_return_reason] : keyword[return] identifier[result] keyword[else] : keyword[return] identifier[result] [ literal[int] ]
def equals(series1, series2, ignore_order=False, ignore_index=False, all_close=False, _return_reason=False): """ Get whether 2 series are equal. ``NaN`` is considered equal to ``NaN`` and `None`. Parameters ---------- series1 : pandas.Series Series to compare. series2 : pandas.Series Series to compare. ignore_order : bool Ignore order of values (and index). ignore_index : bool Ignore index values and name. all_close : bool If `False`, values must match exactly, if `True`, floats are compared as if compared with `numpy.isclose`. _return_reason : bool Internal. If `True`, `equals` returns a tuple containing the reason, else `equals` only returns a bool indicating equality (or equivalence rather). Returns ------- bool Whether they are equal (after ignoring according to the parameters). Internal note: if ``_return_reason``, ``Tuple[bool, str or None]`` is returned. The former is whether they're equal, the latter is `None` if equal or a short explanation of why the series aren't equal, otherwise. Notes ----- All values (including those of indices) must be copyable and ``__eq__`` must be such that a copy must equal its original. A value must equal itself unless it's ``NaN``. Values needn't be orderable or hashable (however pandas requires index values to be orderable and hashable). By consequence, this is not an efficient function, but it is flexible. """ result = _equals(series1, series2, ignore_order, ignore_index, all_close) if _return_reason: return result # depends on [control=['if'], data=[]] else: return result[0]
def gcn(args): """ %prog gcn gencode.v26.exonunion.bed data/*.vcf.gz Compile gene copy njumber based on CANVAS results. """ p = OptionParser(gcn.__doc__) p.set_cpus() p.set_tmpdir(tmpdir="tmp") p.set_outfile() opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) exonbed = args[0] canvasvcfs = args[1:] tsvfile = opts.outfile tmpdir = opts.tmpdir mkdir(tmpdir) set_tempdir(tmpdir) df = vcf_to_df(canvasvcfs, exonbed, opts.cpus) for suffix in (".avgcn", ".medcn"): df_to_tsv(df, tsvfile, suffix)
def function[gcn, parameter[args]]: constant[ %prog gcn gencode.v26.exonunion.bed data/*.vcf.gz Compile gene copy njumber based on CANVAS results. ] variable[p] assign[=] call[name[OptionParser], parameter[name[gcn].__doc__]] call[name[p].set_cpus, parameter[]] call[name[p].set_tmpdir, parameter[]] call[name[p].set_outfile, parameter[]] <ast.Tuple object at 0x7da204622f80> assign[=] call[name[p].parse_args, parameter[name[args]]] if compare[call[name[len], parameter[name[args]]] less[<] constant[2]] begin[:] call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da207f9a560>]] variable[exonbed] assign[=] call[name[args]][constant[0]] variable[canvasvcfs] assign[=] call[name[args]][<ast.Slice object at 0x7da207f98e20>] variable[tsvfile] assign[=] name[opts].outfile variable[tmpdir] assign[=] name[opts].tmpdir call[name[mkdir], parameter[name[tmpdir]]] call[name[set_tempdir], parameter[name[tmpdir]]] variable[df] assign[=] call[name[vcf_to_df], parameter[name[canvasvcfs], name[exonbed], name[opts].cpus]] for taget[name[suffix]] in starred[tuple[[<ast.Constant object at 0x7da18f58f5b0>, <ast.Constant object at 0x7da18f58c700>]]] begin[:] call[name[df_to_tsv], parameter[name[df], name[tsvfile], name[suffix]]]
keyword[def] identifier[gcn] ( identifier[args] ): literal[string] identifier[p] = identifier[OptionParser] ( identifier[gcn] . identifier[__doc__] ) identifier[p] . identifier[set_cpus] () identifier[p] . identifier[set_tmpdir] ( identifier[tmpdir] = literal[string] ) identifier[p] . identifier[set_outfile] () identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] ) keyword[if] identifier[len] ( identifier[args] )< literal[int] : identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ()) identifier[exonbed] = identifier[args] [ literal[int] ] identifier[canvasvcfs] = identifier[args] [ literal[int] :] identifier[tsvfile] = identifier[opts] . identifier[outfile] identifier[tmpdir] = identifier[opts] . identifier[tmpdir] identifier[mkdir] ( identifier[tmpdir] ) identifier[set_tempdir] ( identifier[tmpdir] ) identifier[df] = identifier[vcf_to_df] ( identifier[canvasvcfs] , identifier[exonbed] , identifier[opts] . identifier[cpus] ) keyword[for] identifier[suffix] keyword[in] ( literal[string] , literal[string] ): identifier[df_to_tsv] ( identifier[df] , identifier[tsvfile] , identifier[suffix] )
def gcn(args): """ %prog gcn gencode.v26.exonunion.bed data/*.vcf.gz Compile gene copy njumber based on CANVAS results. """ p = OptionParser(gcn.__doc__) p.set_cpus() p.set_tmpdir(tmpdir='tmp') p.set_outfile() (opts, args) = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) # depends on [control=['if'], data=[]] exonbed = args[0] canvasvcfs = args[1:] tsvfile = opts.outfile tmpdir = opts.tmpdir mkdir(tmpdir) set_tempdir(tmpdir) df = vcf_to_df(canvasvcfs, exonbed, opts.cpus) for suffix in ('.avgcn', '.medcn'): df_to_tsv(df, tsvfile, suffix) # depends on [control=['for'], data=['suffix']]
def recursive_update(default, custom): '''Return a dict merged from default and custom >>> recursive_update('a', 'b') Traceback (most recent call last): ... TypeError: Params of recursive_update should be dicts >>> recursive_update({'a': [1]}, {'a': [2], 'c': {'d': {'c': 3}}}) {'a': [2], 'c': {'d': {'c': 3}}} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'b': 5}) {'a': {'c': 1, 'd': {}}, 'b': 5} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'a': 2}) {'a': 2, 'b': 4} ''' if not isinstance(default, dict) or not isinstance(custom, dict): raise TypeError('Params of recursive_update should be dicts') for key in custom: if isinstance(custom[key], dict) and isinstance( default.get(key), dict): default[key] = recursive_update(default[key], custom[key]) else: default[key] = custom[key] return default
def function[recursive_update, parameter[default, custom]]: constant[Return a dict merged from default and custom >>> recursive_update('a', 'b') Traceback (most recent call last): ... TypeError: Params of recursive_update should be dicts >>> recursive_update({'a': [1]}, {'a': [2], 'c': {'d': {'c': 3}}}) {'a': [2], 'c': {'d': {'c': 3}}} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'b': 5}) {'a': {'c': 1, 'd': {}}, 'b': 5} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'a': 2}) {'a': 2, 'b': 4} ] if <ast.BoolOp object at 0x7da18eb55480> begin[:] <ast.Raise object at 0x7da18eb56350> for taget[name[key]] in starred[name[custom]] begin[:] if <ast.BoolOp object at 0x7da18eb56ce0> begin[:] call[name[default]][name[key]] assign[=] call[name[recursive_update], parameter[call[name[default]][name[key]], call[name[custom]][name[key]]]] return[name[default]]
keyword[def] identifier[recursive_update] ( identifier[default] , identifier[custom] ): literal[string] keyword[if] keyword[not] identifier[isinstance] ( identifier[default] , identifier[dict] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[custom] , identifier[dict] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[for] identifier[key] keyword[in] identifier[custom] : keyword[if] identifier[isinstance] ( identifier[custom] [ identifier[key] ], identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[default] . identifier[get] ( identifier[key] ), identifier[dict] ): identifier[default] [ identifier[key] ]= identifier[recursive_update] ( identifier[default] [ identifier[key] ], identifier[custom] [ identifier[key] ]) keyword[else] : identifier[default] [ identifier[key] ]= identifier[custom] [ identifier[key] ] keyword[return] identifier[default]
def recursive_update(default, custom): """Return a dict merged from default and custom >>> recursive_update('a', 'b') Traceback (most recent call last): ... TypeError: Params of recursive_update should be dicts >>> recursive_update({'a': [1]}, {'a': [2], 'c': {'d': {'c': 3}}}) {'a': [2], 'c': {'d': {'c': 3}}} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'b': 5}) {'a': {'c': 1, 'd': {}}, 'b': 5} >>> recursive_update({'a': {'c': 1, 'd': {}}, 'b': 4}, {'a': 2}) {'a': 2, 'b': 4} """ if not isinstance(default, dict) or not isinstance(custom, dict): raise TypeError('Params of recursive_update should be dicts') # depends on [control=['if'], data=[]] for key in custom: if isinstance(custom[key], dict) and isinstance(default.get(key), dict): default[key] = recursive_update(default[key], custom[key]) # depends on [control=['if'], data=[]] else: default[key] = custom[key] # depends on [control=['for'], data=['key']] return default
def _set_redistribute_ospf(self, v, load=False): """ Setter method for redistribute_ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf/redistribute/redistribute_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_redistribute_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redistribute_ospf() directly. YANG Description: OSPF routes """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """redistribute_ospf must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True)""", }) self.__redistribute_ospf = t if hasattr(self, '_set'): self._set()
def function[_set_redistribute_ospf, parameter[self, v, load]]: constant[ Setter method for redistribute_ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf/redistribute/redistribute_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_redistribute_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redistribute_ospf() directly. YANG Description: OSPF routes ] if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:] variable[v] assign[=] call[name[v]._utype, parameter[name[v]]] <ast.Try object at 0x7da18dc07fd0> name[self].__redistribute_ospf assign[=] name[t] if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:] call[name[self]._set, parameter[]]
keyword[def] identifier[_set_redistribute_ospf] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ): literal[string] keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ): identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] ) keyword[try] : identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[redistribute_ospf] . identifier[redistribute_ospf] , identifier[is_container] = literal[string] , identifier[presence] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] ) keyword[except] ( identifier[TypeError] , identifier[ValueError] ): keyword[raise] identifier[ValueError] ({ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : literal[string] , }) identifier[self] . identifier[__redistribute_ospf] = identifier[t] keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ): identifier[self] . identifier[_set] ()
def _set_redistribute_ospf(self, v, load=False): """ Setter method for redistribute_ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf/redistribute/redistribute_ospf (container) If this variable is read-only (config: false) in the source YANG file, then _set_redistribute_ospf is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_redistribute_ospf() directly. YANG Description: OSPF routes """ if hasattr(v, '_utype'): v = v._utype(v) # depends on [control=['if'], data=[]] try: t = YANGDynClass(v, base=redistribute_ospf.redistribute_ospf, is_container='container', presence=True, yang_name='redistribute-ospf', rest_name='ospf', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'OSPF routes', u'alt-name': u'ospf'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='container', is_config=True) # depends on [control=['try'], data=[]] except (TypeError, ValueError): raise ValueError({'error-string': 'redistribute_ospf must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=redistribute_ospf.redistribute_ospf, is_container=\'container\', presence=True, yang_name="redistribute-ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'OSPF routes\', u\'alt-name\': u\'ospf\'}}, namespace=\'urn:brocade.com:mgmt:brocade-ospfv3\', defining_module=\'brocade-ospfv3\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]] self.__redistribute_ospf = t if hasattr(self, '_set'): self._set() # depends on [control=['if'], data=[]]
def download_message_media(msg_id): """Download a media file""" message = g.driver.get_message_by_id(msg_id) if not message or not message.mime: abort(404) profile_path = create_static_profile_path(g.client_id) filename = message.save_media(profile_path, True) if os.path.exists(filename): return send_file(filename, mimetype=message.mime) abort(404)
def function[download_message_media, parameter[msg_id]]: constant[Download a media file] variable[message] assign[=] call[name[g].driver.get_message_by_id, parameter[name[msg_id]]] if <ast.BoolOp object at 0x7da1b1c7a980> begin[:] call[name[abort], parameter[constant[404]]] variable[profile_path] assign[=] call[name[create_static_profile_path], parameter[name[g].client_id]] variable[filename] assign[=] call[name[message].save_media, parameter[name[profile_path], constant[True]]] if call[name[os].path.exists, parameter[name[filename]]] begin[:] return[call[name[send_file], parameter[name[filename]]]] call[name[abort], parameter[constant[404]]]
keyword[def] identifier[download_message_media] ( identifier[msg_id] ): literal[string] identifier[message] = identifier[g] . identifier[driver] . identifier[get_message_by_id] ( identifier[msg_id] ) keyword[if] keyword[not] identifier[message] keyword[or] keyword[not] identifier[message] . identifier[mime] : identifier[abort] ( literal[int] ) identifier[profile_path] = identifier[create_static_profile_path] ( identifier[g] . identifier[client_id] ) identifier[filename] = identifier[message] . identifier[save_media] ( identifier[profile_path] , keyword[True] ) keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ): keyword[return] identifier[send_file] ( identifier[filename] , identifier[mimetype] = identifier[message] . identifier[mime] ) identifier[abort] ( literal[int] )
def download_message_media(msg_id): """Download a media file""" message = g.driver.get_message_by_id(msg_id) if not message or not message.mime: abort(404) # depends on [control=['if'], data=[]] profile_path = create_static_profile_path(g.client_id) filename = message.save_media(profile_path, True) if os.path.exists(filename): return send_file(filename, mimetype=message.mime) # depends on [control=['if'], data=[]] abort(404)
def onlineTable(self, login, tableName): """ Parameters: - login - tableName """ self.send_onlineTable(login, tableName) self.recv_onlineTable()
def function[onlineTable, parameter[self, login, tableName]]: constant[ Parameters: - login - tableName ] call[name[self].send_onlineTable, parameter[name[login], name[tableName]]] call[name[self].recv_onlineTable, parameter[]]
keyword[def] identifier[onlineTable] ( identifier[self] , identifier[login] , identifier[tableName] ): literal[string] identifier[self] . identifier[send_onlineTable] ( identifier[login] , identifier[tableName] ) identifier[self] . identifier[recv_onlineTable] ()
def onlineTable(self, login, tableName): """ Parameters: - login - tableName """ self.send_onlineTable(login, tableName) self.recv_onlineTable()
def emoticons_tag(parser, token): """ Tag for rendering emoticons. """ exclude = '' args = token.split_contents() if len(args) == 2: exclude = args[1] elif len(args) > 2: raise template.TemplateSyntaxError( 'emoticons tag has only one optional argument') nodelist = parser.parse(['endemoticons']) parser.delete_first_token() return EmoticonNode(nodelist, exclude)
def function[emoticons_tag, parameter[parser, token]]: constant[ Tag for rendering emoticons. ] variable[exclude] assign[=] constant[] variable[args] assign[=] call[name[token].split_contents, parameter[]] if compare[call[name[len], parameter[name[args]]] equal[==] constant[2]] begin[:] variable[exclude] assign[=] call[name[args]][constant[1]] variable[nodelist] assign[=] call[name[parser].parse, parameter[list[[<ast.Constant object at 0x7da1b2480c40>]]]] call[name[parser].delete_first_token, parameter[]] return[call[name[EmoticonNode], parameter[name[nodelist], name[exclude]]]]
keyword[def] identifier[emoticons_tag] ( identifier[parser] , identifier[token] ): literal[string] identifier[exclude] = literal[string] identifier[args] = identifier[token] . identifier[split_contents] () keyword[if] identifier[len] ( identifier[args] )== literal[int] : identifier[exclude] = identifier[args] [ literal[int] ] keyword[elif] identifier[len] ( identifier[args] )> literal[int] : keyword[raise] identifier[template] . identifier[TemplateSyntaxError] ( literal[string] ) identifier[nodelist] = identifier[parser] . identifier[parse] ([ literal[string] ]) identifier[parser] . identifier[delete_first_token] () keyword[return] identifier[EmoticonNode] ( identifier[nodelist] , identifier[exclude] )
def emoticons_tag(parser, token): """ Tag for rendering emoticons. """ exclude = '' args = token.split_contents() if len(args) == 2: exclude = args[1] # depends on [control=['if'], data=[]] elif len(args) > 2: raise template.TemplateSyntaxError('emoticons tag has only one optional argument') # depends on [control=['if'], data=[]] nodelist = parser.parse(['endemoticons']) parser.delete_first_token() return EmoticonNode(nodelist, exclude)
def distance_home(self): """ Distance away from home, in meters. Returns 3D distance if `down` is known, otherwise 2D distance. """ if self.north is not None and self.east is not None: if self.down is not None: return math.sqrt(self.north**2 + self.east**2 + self.down**2) else: return math.sqrt(self.north**2 + self.east**2)
def function[distance_home, parameter[self]]: constant[ Distance away from home, in meters. Returns 3D distance if `down` is known, otherwise 2D distance. ] if <ast.BoolOp object at 0x7da18dc06650> begin[:] if compare[name[self].down is_not constant[None]] begin[:] return[call[name[math].sqrt, parameter[binary_operation[binary_operation[binary_operation[name[self].north ** constant[2]] + binary_operation[name[self].east ** constant[2]]] + binary_operation[name[self].down ** constant[2]]]]]]
keyword[def] identifier[distance_home] ( identifier[self] ): literal[string] keyword[if] identifier[self] . identifier[north] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[east] keyword[is] keyword[not] keyword[None] : keyword[if] identifier[self] . identifier[down] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[math] . identifier[sqrt] ( identifier[self] . identifier[north] ** literal[int] + identifier[self] . identifier[east] ** literal[int] + identifier[self] . identifier[down] ** literal[int] ) keyword[else] : keyword[return] identifier[math] . identifier[sqrt] ( identifier[self] . identifier[north] ** literal[int] + identifier[self] . identifier[east] ** literal[int] )
def distance_home(self): """ Distance away from home, in meters. Returns 3D distance if `down` is known, otherwise 2D distance. """ if self.north is not None and self.east is not None: if self.down is not None: return math.sqrt(self.north ** 2 + self.east ** 2 + self.down ** 2) # depends on [control=['if'], data=[]] else: return math.sqrt(self.north ** 2 + self.east ** 2) # depends on [control=['if'], data=[]]
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ evl = self.input.payload if isinstance(evl, Evaluation): summary = evl.summary(title=self.resolve_option("title"), complexity=bool(self.resolve_option("complexity"))) if bool(self.resolve_option("matrix")): summary += "\n" + evl.matrix(title=self.resolve_option("title")) else: summary = evl.cluster_results self._output.append(Token(summary)) return None
def function[do_execute, parameter[self]]: constant[ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str ] variable[evl] assign[=] name[self].input.payload if call[name[isinstance], parameter[name[evl], name[Evaluation]]] begin[:] variable[summary] assign[=] call[name[evl].summary, parameter[]] if call[name[bool], parameter[call[name[self].resolve_option, parameter[constant[matrix]]]]] begin[:] <ast.AugAssign object at 0x7da1b23454b0> call[name[self]._output.append, parameter[call[name[Token], parameter[name[summary]]]]] return[constant[None]]
keyword[def] identifier[do_execute] ( identifier[self] ): literal[string] identifier[evl] = identifier[self] . identifier[input] . identifier[payload] keyword[if] identifier[isinstance] ( identifier[evl] , identifier[Evaluation] ): identifier[summary] = identifier[evl] . identifier[summary] ( identifier[title] = identifier[self] . identifier[resolve_option] ( literal[string] ), identifier[complexity] = identifier[bool] ( identifier[self] . identifier[resolve_option] ( literal[string] ))) keyword[if] identifier[bool] ( identifier[self] . identifier[resolve_option] ( literal[string] )): identifier[summary] += literal[string] + identifier[evl] . identifier[matrix] ( identifier[title] = identifier[self] . identifier[resolve_option] ( literal[string] )) keyword[else] : identifier[summary] = identifier[evl] . identifier[cluster_results] identifier[self] . identifier[_output] . identifier[append] ( identifier[Token] ( identifier[summary] )) keyword[return] keyword[None]
def do_execute(self): """ The actual execution of the actor. :return: None if successful, otherwise error message :rtype: str """ evl = self.input.payload if isinstance(evl, Evaluation): summary = evl.summary(title=self.resolve_option('title'), complexity=bool(self.resolve_option('complexity'))) if bool(self.resolve_option('matrix')): summary += '\n' + evl.matrix(title=self.resolve_option('title')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] else: summary = evl.cluster_results self._output.append(Token(summary)) return None
def prepare_post_parameters(self, post_params=None, files=None): """ Builds form parameters. :param post_params: Normal form parameters. :param files: File parameters. :return: Form parameters with files. """ params = {} if post_params: params.update(post_params) if files: for k, v in iteritems(files): if not v: continue with open(v, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = mimetypes.\ guess_type(filename)[0] or 'application/octet-stream' params[k] = tuple([filename, filedata, mimetype]) return params
def function[prepare_post_parameters, parameter[self, post_params, files]]: constant[ Builds form parameters. :param post_params: Normal form parameters. :param files: File parameters. :return: Form parameters with files. ] variable[params] assign[=] dictionary[[], []] if name[post_params] begin[:] call[name[params].update, parameter[name[post_params]]] if name[files] begin[:] for taget[tuple[[<ast.Name object at 0x7da20c6e58a0>, <ast.Name object at 0x7da20c6e5780>]]] in starred[call[name[iteritems], parameter[name[files]]]] begin[:] if <ast.UnaryOp object at 0x7da20c6e6200> begin[:] continue with call[name[open], parameter[name[v], constant[rb]]] begin[:] variable[filename] assign[=] call[name[os].path.basename, parameter[name[f].name]] variable[filedata] assign[=] call[name[f].read, parameter[]] variable[mimetype] assign[=] <ast.BoolOp object at 0x7da20c6e7760> call[name[params]][name[k]] assign[=] call[name[tuple], parameter[list[[<ast.Name object at 0x7da20c6e75e0>, <ast.Name object at 0x7da20c6e4d60>, <ast.Name object at 0x7da20c6e64a0>]]]] return[name[params]]
keyword[def] identifier[prepare_post_parameters] ( identifier[self] , identifier[post_params] = keyword[None] , identifier[files] = keyword[None] ): literal[string] identifier[params] ={} keyword[if] identifier[post_params] : identifier[params] . identifier[update] ( identifier[post_params] ) keyword[if] identifier[files] : keyword[for] identifier[k] , identifier[v] keyword[in] identifier[iteritems] ( identifier[files] ): keyword[if] keyword[not] identifier[v] : keyword[continue] keyword[with] identifier[open] ( identifier[v] , literal[string] ) keyword[as] identifier[f] : identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[f] . identifier[name] ) identifier[filedata] = identifier[f] . identifier[read] () identifier[mimetype] = identifier[mimetypes] . identifier[guess_type] ( identifier[filename] )[ literal[int] ] keyword[or] literal[string] identifier[params] [ identifier[k] ]= identifier[tuple] ([ identifier[filename] , identifier[filedata] , identifier[mimetype] ]) keyword[return] identifier[params]
def prepare_post_parameters(self, post_params=None, files=None): """ Builds form parameters. :param post_params: Normal form parameters. :param files: File parameters. :return: Form parameters with files. """ params = {} if post_params: params.update(post_params) # depends on [control=['if'], data=[]] if files: for (k, v) in iteritems(files): if not v: continue # depends on [control=['if'], data=[]] with open(v, 'rb') as f: filename = os.path.basename(f.name) filedata = f.read() mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream' params[k] = tuple([filename, filedata, mimetype]) # depends on [control=['with'], data=['f']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] return params
def set_value(self, value): """Set value to action.""" self._value = value for proxy in self.get_proxies(): proxy.handler_block(self._changed_handlers[proxy]) proxy.set_value(self._value) proxy.handler_unblock(self._changed_handlers[proxy]) pass self.emit('changed') return
def function[set_value, parameter[self, value]]: constant[Set value to action.] name[self]._value assign[=] name[value] for taget[name[proxy]] in starred[call[name[self].get_proxies, parameter[]]] begin[:] call[name[proxy].handler_block, parameter[call[name[self]._changed_handlers][name[proxy]]]] call[name[proxy].set_value, parameter[name[self]._value]] call[name[proxy].handler_unblock, parameter[call[name[self]._changed_handlers][name[proxy]]]] pass call[name[self].emit, parameter[constant[changed]]] return[None]
keyword[def] identifier[set_value] ( identifier[self] , identifier[value] ): literal[string] identifier[self] . identifier[_value] = identifier[value] keyword[for] identifier[proxy] keyword[in] identifier[self] . identifier[get_proxies] (): identifier[proxy] . identifier[handler_block] ( identifier[self] . identifier[_changed_handlers] [ identifier[proxy] ]) identifier[proxy] . identifier[set_value] ( identifier[self] . identifier[_value] ) identifier[proxy] . identifier[handler_unblock] ( identifier[self] . identifier[_changed_handlers] [ identifier[proxy] ]) keyword[pass] identifier[self] . identifier[emit] ( literal[string] ) keyword[return]
def set_value(self, value): """Set value to action.""" self._value = value for proxy in self.get_proxies(): proxy.handler_block(self._changed_handlers[proxy]) proxy.set_value(self._value) proxy.handler_unblock(self._changed_handlers[proxy]) pass # depends on [control=['for'], data=['proxy']] self.emit('changed') return
def parse_duration(line): """ Return a timedelta object from a string in the DURATION property format """ DAYS, SECS = {'D': 1, 'W': 7}, {'S': 1, 'M': 60, 'H': 3600} sign, i = 1, 0 if line[i] in '-+': if line[i] == '-': sign = -1 i += 1 if line[i] != 'P': raise parse.ParseError() i += 1 days, secs = 0, 0 while i < len(line): if line[i] == 'T': i += 1 if i == len(line): break j = i while line[j].isdigit(): j += 1 if i == j: raise parse.ParseError() val = int(line[i:j]) if line[j] in DAYS: days += val * DAYS[line[j]] DAYS.pop(line[j]) elif line[j] in SECS: secs += val * SECS[line[j]] SECS.pop(line[j]) else: raise parse.ParseError() i = j + 1 return timedelta(sign * days, sign * secs)
def function[parse_duration, parameter[line]]: constant[ Return a timedelta object from a string in the DURATION property format ] <ast.Tuple object at 0x7da18f00d810> assign[=] tuple[[<ast.Dict object at 0x7da18f00ed40>, <ast.Dict object at 0x7da18f00d7b0>]] <ast.Tuple object at 0x7da18f00c550> assign[=] tuple[[<ast.Constant object at 0x7da18f00c790>, <ast.Constant object at 0x7da18f00da80>]] if compare[call[name[line]][name[i]] in constant[-+]] begin[:] if compare[call[name[line]][name[i]] equal[==] constant[-]] begin[:] variable[sign] assign[=] <ast.UnaryOp object at 0x7da18f00c8e0> <ast.AugAssign object at 0x7da18f00ddb0> if compare[call[name[line]][name[i]] not_equal[!=] constant[P]] begin[:] <ast.Raise object at 0x7da18f00d180> <ast.AugAssign object at 0x7da18f00db70> <ast.Tuple object at 0x7da18f00e5c0> assign[=] tuple[[<ast.Constant object at 0x7da18f00efe0>, <ast.Constant object at 0x7da18f00e500>]] while compare[name[i] less[<] call[name[len], parameter[name[line]]]] begin[:] if compare[call[name[line]][name[i]] equal[==] constant[T]] begin[:] <ast.AugAssign object at 0x7da18f00cb20> if compare[name[i] equal[==] call[name[len], parameter[name[line]]]] begin[:] break variable[j] assign[=] name[i] while call[call[name[line]][name[j]].isdigit, parameter[]] begin[:] <ast.AugAssign object at 0x7da18f00fd30> if compare[name[i] equal[==] name[j]] begin[:] <ast.Raise object at 0x7da18f00d000> variable[val] assign[=] call[name[int], parameter[call[name[line]][<ast.Slice object at 0x7da18f00ece0>]]] if compare[call[name[line]][name[j]] in name[DAYS]] begin[:] <ast.AugAssign object at 0x7da18f00f7c0> call[name[DAYS].pop, parameter[call[name[line]][name[j]]]] variable[i] assign[=] binary_operation[name[j] + constant[1]] return[call[name[timedelta], parameter[binary_operation[name[sign] * name[days]], binary_operation[name[sign] * name[secs]]]]]
keyword[def] identifier[parse_duration] ( identifier[line] ): literal[string] identifier[DAYS] , identifier[SECS] ={ literal[string] : literal[int] , literal[string] : literal[int] },{ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] } identifier[sign] , identifier[i] = literal[int] , literal[int] keyword[if] identifier[line] [ identifier[i] ] keyword[in] literal[string] : keyword[if] identifier[line] [ identifier[i] ]== literal[string] : identifier[sign] =- literal[int] identifier[i] += literal[int] keyword[if] identifier[line] [ identifier[i] ]!= literal[string] : keyword[raise] identifier[parse] . identifier[ParseError] () identifier[i] += literal[int] identifier[days] , identifier[secs] = literal[int] , literal[int] keyword[while] identifier[i] < identifier[len] ( identifier[line] ): keyword[if] identifier[line] [ identifier[i] ]== literal[string] : identifier[i] += literal[int] keyword[if] identifier[i] == identifier[len] ( identifier[line] ): keyword[break] identifier[j] = identifier[i] keyword[while] identifier[line] [ identifier[j] ]. identifier[isdigit] (): identifier[j] += literal[int] keyword[if] identifier[i] == identifier[j] : keyword[raise] identifier[parse] . identifier[ParseError] () identifier[val] = identifier[int] ( identifier[line] [ identifier[i] : identifier[j] ]) keyword[if] identifier[line] [ identifier[j] ] keyword[in] identifier[DAYS] : identifier[days] += identifier[val] * identifier[DAYS] [ identifier[line] [ identifier[j] ]] identifier[DAYS] . identifier[pop] ( identifier[line] [ identifier[j] ]) keyword[elif] identifier[line] [ identifier[j] ] keyword[in] identifier[SECS] : identifier[secs] += identifier[val] * identifier[SECS] [ identifier[line] [ identifier[j] ]] identifier[SECS] . identifier[pop] ( identifier[line] [ identifier[j] ]) keyword[else] : keyword[raise] identifier[parse] . identifier[ParseError] () identifier[i] = identifier[j] + literal[int] keyword[return] identifier[timedelta] ( identifier[sign] * identifier[days] , identifier[sign] * identifier[secs] )
def parse_duration(line): """ Return a timedelta object from a string in the DURATION property format """ (DAYS, SECS) = ({'D': 1, 'W': 7}, {'S': 1, 'M': 60, 'H': 3600}) (sign, i) = (1, 0) if line[i] in '-+': if line[i] == '-': sign = -1 # depends on [control=['if'], data=[]] i += 1 # depends on [control=['if'], data=[]] if line[i] != 'P': raise parse.ParseError() # depends on [control=['if'], data=[]] i += 1 (days, secs) = (0, 0) while i < len(line): if line[i] == 'T': i += 1 if i == len(line): break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] j = i while line[j].isdigit(): j += 1 # depends on [control=['while'], data=[]] if i == j: raise parse.ParseError() # depends on [control=['if'], data=[]] val = int(line[i:j]) if line[j] in DAYS: days += val * DAYS[line[j]] DAYS.pop(line[j]) # depends on [control=['if'], data=['DAYS']] elif line[j] in SECS: secs += val * SECS[line[j]] SECS.pop(line[j]) # depends on [control=['if'], data=['SECS']] else: raise parse.ParseError() i = j + 1 # depends on [control=['while'], data=['i']] return timedelta(sign * days, sign * secs)
def to_native(self, value): """Load a value as a list, converting items if necessary""" if isinstance(value, six.string_types): value_list = value.split(self.string_delim) else: value_list = value to_native = self.member_type.to_native if self.member_type is not None else lambda x: x return [to_native(item) for item in value_list]
def function[to_native, parameter[self, value]]: constant[Load a value as a list, converting items if necessary] if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:] variable[value_list] assign[=] call[name[value].split, parameter[name[self].string_delim]] variable[to_native] assign[=] <ast.IfExp object at 0x7da1b2436140> return[<ast.ListComp object at 0x7da1b2435d20>]
keyword[def] identifier[to_native] ( identifier[self] , identifier[value] ): literal[string] keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ): identifier[value_list] = identifier[value] . identifier[split] ( identifier[self] . identifier[string_delim] ) keyword[else] : identifier[value_list] = identifier[value] identifier[to_native] = identifier[self] . identifier[member_type] . identifier[to_native] keyword[if] identifier[self] . identifier[member_type] keyword[is] keyword[not] keyword[None] keyword[else] keyword[lambda] identifier[x] : identifier[x] keyword[return] [ identifier[to_native] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[value_list] ]
def to_native(self, value): """Load a value as a list, converting items if necessary""" if isinstance(value, six.string_types): value_list = value.split(self.string_delim) # depends on [control=['if'], data=[]] else: value_list = value to_native = self.member_type.to_native if self.member_type is not None else lambda x: x return [to_native(item) for item in value_list]
def handle_set_services(self, req): """Handles the POST v2/<account>/.services call for setting services information. Can only be called by a reseller .admin. In the :func:`handle_get_account` (GET v2/<account>) call, a section of the returned JSON dict is `services`. This section looks something like this:: "services": {"storage": {"default": "local", "local": "http://127.0.0.1:8080/v1/AUTH_018c3946"}} Making use of this section is described in :func:`handle_get_token`. This function allows setting values within this section for the <account>, allowing the addition of new service end points or updating existing ones. The body of the POST request should contain a JSON dict with the following format:: {"service_name": {"end_point_name": "end_point_value"}} There can be multiple services and multiple end points in the same call. Any new services or end points will be added to the existing set of services and end points. Any existing services with the same service name will be merged with the new end points. Any existing end points with the same end point name will have their values updated. The updated services dictionary will be returned on success. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with the udpated services JSON dict as described above """ if not self.is_reseller_admin(req): return self.denied_response(req) account = req.path_info_pop() if req.path_info != '/.services' or not account or account[0] == '.': return HTTPBadRequest(request=req) try: new_services = json.loads(req.body) except ValueError as err: return HTTPBadRequest(body=str(err)) # Get the current services information path = quote('/v1/%s/%s/.services' % (self.auth_account, account)) resp = self.make_pre_authed_request( req.environ, 'GET', path).get_response(self.app) if resp.status_int == 404: return HTTPNotFound(request=req) if resp.status_int // 100 != 2: raise Exception('Could not obtain services info: %s %s' % (path, resp.status)) services = json.loads(resp.body) for new_service, value in new_services.iteritems(): if new_service in services: services[new_service].update(value) else: services[new_service] = value # Save the new services information services = json.dumps(services) resp = self.make_pre_authed_request( req.environ, 'PUT', path, services).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not save .services object: %s %s' % (path, resp.status)) return Response(request=req, body=services, content_type=CONTENT_TYPE_JSON)
def function[handle_set_services, parameter[self, req]]: constant[Handles the POST v2/<account>/.services call for setting services information. Can only be called by a reseller .admin. In the :func:`handle_get_account` (GET v2/<account>) call, a section of the returned JSON dict is `services`. This section looks something like this:: "services": {"storage": {"default": "local", "local": "http://127.0.0.1:8080/v1/AUTH_018c3946"}} Making use of this section is described in :func:`handle_get_token`. This function allows setting values within this section for the <account>, allowing the addition of new service end points or updating existing ones. The body of the POST request should contain a JSON dict with the following format:: {"service_name": {"end_point_name": "end_point_value"}} There can be multiple services and multiple end points in the same call. Any new services or end points will be added to the existing set of services and end points. Any existing services with the same service name will be merged with the new end points. Any existing end points with the same end point name will have their values updated. The updated services dictionary will be returned on success. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with the udpated services JSON dict as described above ] if <ast.UnaryOp object at 0x7da1b0558430> begin[:] return[call[name[self].denied_response, parameter[name[req]]]] variable[account] assign[=] call[name[req].path_info_pop, parameter[]] if <ast.BoolOp object at 0x7da1b055b2b0> begin[:] return[call[name[HTTPBadRequest], parameter[]]] <ast.Try object at 0x7da1b055b4c0> variable[path] assign[=] call[name[quote], parameter[binary_operation[constant[/v1/%s/%s/.services] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b0559d50>, <ast.Name object at 0x7da1b0559cc0>]]]]] variable[resp] assign[=] call[call[name[self].make_pre_authed_request, parameter[name[req].environ, constant[GET], name[path]]].get_response, parameter[name[self].app]] if compare[name[resp].status_int equal[==] constant[404]] begin[:] return[call[name[HTTPNotFound], parameter[]]] if compare[binary_operation[name[resp].status_int <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b05590f0> variable[services] assign[=] call[name[json].loads, parameter[name[resp].body]] for taget[tuple[[<ast.Name object at 0x7da1b05594b0>, <ast.Name object at 0x7da1b05593c0>]]] in starred[call[name[new_services].iteritems, parameter[]]] begin[:] if compare[name[new_service] in name[services]] begin[:] call[call[name[services]][name[new_service]].update, parameter[name[value]]] variable[services] assign[=] call[name[json].dumps, parameter[name[services]]] variable[resp] assign[=] call[call[name[self].make_pre_authed_request, parameter[name[req].environ, constant[PUT], name[path], name[services]]].get_response, parameter[name[self].app]] if compare[binary_operation[name[resp].status_int <ast.FloorDiv object at 0x7da2590d6bc0> constant[100]] not_equal[!=] constant[2]] begin[:] <ast.Raise object at 0x7da1b0558760> return[call[name[Response], parameter[]]]
keyword[def] identifier[handle_set_services] ( identifier[self] , identifier[req] ): literal[string] keyword[if] keyword[not] identifier[self] . identifier[is_reseller_admin] ( identifier[req] ): keyword[return] identifier[self] . identifier[denied_response] ( identifier[req] ) identifier[account] = identifier[req] . identifier[path_info_pop] () keyword[if] identifier[req] . identifier[path_info] != literal[string] keyword[or] keyword[not] identifier[account] keyword[or] identifier[account] [ literal[int] ]== literal[string] : keyword[return] identifier[HTTPBadRequest] ( identifier[request] = identifier[req] ) keyword[try] : identifier[new_services] = identifier[json] . identifier[loads] ( identifier[req] . identifier[body] ) keyword[except] identifier[ValueError] keyword[as] identifier[err] : keyword[return] identifier[HTTPBadRequest] ( identifier[body] = identifier[str] ( identifier[err] )) identifier[path] = identifier[quote] ( literal[string] %( identifier[self] . identifier[auth_account] , identifier[account] )) identifier[resp] = identifier[self] . identifier[make_pre_authed_request] ( identifier[req] . identifier[environ] , literal[string] , identifier[path] ). identifier[get_response] ( identifier[self] . identifier[app] ) keyword[if] identifier[resp] . identifier[status_int] == literal[int] : keyword[return] identifier[HTTPNotFound] ( identifier[request] = identifier[req] ) keyword[if] identifier[resp] . identifier[status_int] // literal[int] != literal[int] : keyword[raise] identifier[Exception] ( literal[string] % ( identifier[path] , identifier[resp] . identifier[status] )) identifier[services] = identifier[json] . identifier[loads] ( identifier[resp] . identifier[body] ) keyword[for] identifier[new_service] , identifier[value] keyword[in] identifier[new_services] . identifier[iteritems] (): keyword[if] identifier[new_service] keyword[in] identifier[services] : identifier[services] [ identifier[new_service] ]. identifier[update] ( identifier[value] ) keyword[else] : identifier[services] [ identifier[new_service] ]= identifier[value] identifier[services] = identifier[json] . identifier[dumps] ( identifier[services] ) identifier[resp] = identifier[self] . identifier[make_pre_authed_request] ( identifier[req] . identifier[environ] , literal[string] , identifier[path] , identifier[services] ). identifier[get_response] ( identifier[self] . identifier[app] ) keyword[if] identifier[resp] . identifier[status_int] // literal[int] != literal[int] : keyword[raise] identifier[Exception] ( literal[string] % ( identifier[path] , identifier[resp] . identifier[status] )) keyword[return] identifier[Response] ( identifier[request] = identifier[req] , identifier[body] = identifier[services] , identifier[content_type] = identifier[CONTENT_TYPE_JSON] )
def handle_set_services(self, req): """Handles the POST v2/<account>/.services call for setting services information. Can only be called by a reseller .admin. In the :func:`handle_get_account` (GET v2/<account>) call, a section of the returned JSON dict is `services`. This section looks something like this:: "services": {"storage": {"default": "local", "local": "http://127.0.0.1:8080/v1/AUTH_018c3946"}} Making use of this section is described in :func:`handle_get_token`. This function allows setting values within this section for the <account>, allowing the addition of new service end points or updating existing ones. The body of the POST request should contain a JSON dict with the following format:: {"service_name": {"end_point_name": "end_point_value"}} There can be multiple services and multiple end points in the same call. Any new services or end points will be added to the existing set of services and end points. Any existing services with the same service name will be merged with the new end points. Any existing end points with the same end point name will have their values updated. The updated services dictionary will be returned on success. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with the udpated services JSON dict as described above """ if not self.is_reseller_admin(req): return self.denied_response(req) # depends on [control=['if'], data=[]] account = req.path_info_pop() if req.path_info != '/.services' or not account or account[0] == '.': return HTTPBadRequest(request=req) # depends on [control=['if'], data=[]] try: new_services = json.loads(req.body) # depends on [control=['try'], data=[]] except ValueError as err: return HTTPBadRequest(body=str(err)) # depends on [control=['except'], data=['err']] # Get the current services information path = quote('/v1/%s/%s/.services' % (self.auth_account, account)) resp = self.make_pre_authed_request(req.environ, 'GET', path).get_response(self.app) if resp.status_int == 404: return HTTPNotFound(request=req) # depends on [control=['if'], data=[]] if resp.status_int // 100 != 2: raise Exception('Could not obtain services info: %s %s' % (path, resp.status)) # depends on [control=['if'], data=[]] services = json.loads(resp.body) for (new_service, value) in new_services.iteritems(): if new_service in services: services[new_service].update(value) # depends on [control=['if'], data=['new_service', 'services']] else: services[new_service] = value # depends on [control=['for'], data=[]] # Save the new services information services = json.dumps(services) resp = self.make_pre_authed_request(req.environ, 'PUT', path, services).get_response(self.app) if resp.status_int // 100 != 2: raise Exception('Could not save .services object: %s %s' % (path, resp.status)) # depends on [control=['if'], data=[]] return Response(request=req, body=services, content_type=CONTENT_TYPE_JSON)
def refreshFromTarget(self, level=0): """ Refreshes the configuration tree from the target it monitors (if present). Recursively call _refreshNodeFromTarget for itself and all children. Subclasses should typically override _refreshNodeFromTarget instead of this function. During updateTarget's execution refreshFromTarget is blocked to avoid loops. """ if self.getRefreshBlocked(): logger.debug("_refreshNodeFromTarget blocked") return if False and level == 0: logger.debug("refreshFromTarget: {}".format(self.nodePath)) self._refreshNodeFromTarget() for child in self.childItems: child.refreshFromTarget(level=level + 1)
def function[refreshFromTarget, parameter[self, level]]: constant[ Refreshes the configuration tree from the target it monitors (if present). Recursively call _refreshNodeFromTarget for itself and all children. Subclasses should typically override _refreshNodeFromTarget instead of this function. During updateTarget's execution refreshFromTarget is blocked to avoid loops. ] if call[name[self].getRefreshBlocked, parameter[]] begin[:] call[name[logger].debug, parameter[constant[_refreshNodeFromTarget blocked]]] return[None] if <ast.BoolOp object at 0x7da1b0539150> begin[:] call[name[logger].debug, parameter[call[constant[refreshFromTarget: {}].format, parameter[name[self].nodePath]]]] call[name[self]._refreshNodeFromTarget, parameter[]] for taget[name[child]] in starred[name[self].childItems] begin[:] call[name[child].refreshFromTarget, parameter[]]
keyword[def] identifier[refreshFromTarget] ( identifier[self] , identifier[level] = literal[int] ): literal[string] keyword[if] identifier[self] . identifier[getRefreshBlocked] (): identifier[logger] . identifier[debug] ( literal[string] ) keyword[return] keyword[if] keyword[False] keyword[and] identifier[level] == literal[int] : identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[self] . identifier[nodePath] )) identifier[self] . identifier[_refreshNodeFromTarget] () keyword[for] identifier[child] keyword[in] identifier[self] . identifier[childItems] : identifier[child] . identifier[refreshFromTarget] ( identifier[level] = identifier[level] + literal[int] )
def refreshFromTarget(self, level=0): """ Refreshes the configuration tree from the target it monitors (if present). Recursively call _refreshNodeFromTarget for itself and all children. Subclasses should typically override _refreshNodeFromTarget instead of this function. During updateTarget's execution refreshFromTarget is blocked to avoid loops. """ if self.getRefreshBlocked(): logger.debug('_refreshNodeFromTarget blocked') return # depends on [control=['if'], data=[]] if False and level == 0: logger.debug('refreshFromTarget: {}'.format(self.nodePath)) # depends on [control=['if'], data=[]] self._refreshNodeFromTarget() for child in self.childItems: child.refreshFromTarget(level=level + 1) # depends on [control=['for'], data=['child']]
def update(self, role_sid=values.unset, last_consumed_message_index=values.unset): """ Update the MemberInstance :param unicode role_sid: The Role assigned to this member. :param unicode last_consumed_message_index: An Integer representing index of the last Message this Member has read within this Channel :returns: Updated MemberInstance :rtype: twilio.rest.chat.v1.service.channel.member.MemberInstance """ data = values.of({'RoleSid': role_sid, 'LastConsumedMessageIndex': last_consumed_message_index, }) payload = self._version.update( 'POST', self._uri, data=data, ) return MemberInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], sid=self._solution['sid'], )
def function[update, parameter[self, role_sid, last_consumed_message_index]]: constant[ Update the MemberInstance :param unicode role_sid: The Role assigned to this member. :param unicode last_consumed_message_index: An Integer representing index of the last Message this Member has read within this Channel :returns: Updated MemberInstance :rtype: twilio.rest.chat.v1.service.channel.member.MemberInstance ] variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da1b23471f0>, <ast.Constant object at 0x7da1b2346e30>], [<ast.Name object at 0x7da1b2345cf0>, <ast.Name object at 0x7da1b2344b80>]]]] variable[payload] assign[=] call[name[self]._version.update, parameter[constant[POST], name[self]._uri]] return[call[name[MemberInstance], parameter[name[self]._version, name[payload]]]]
keyword[def] identifier[update] ( identifier[self] , identifier[role_sid] = identifier[values] . identifier[unset] , identifier[last_consumed_message_index] = identifier[values] . identifier[unset] ): literal[string] identifier[data] = identifier[values] . identifier[of] ({ literal[string] : identifier[role_sid] , literal[string] : identifier[last_consumed_message_index] ,}) identifier[payload] = identifier[self] . identifier[_version] . identifier[update] ( literal[string] , identifier[self] . identifier[_uri] , identifier[data] = identifier[data] , ) keyword[return] identifier[MemberInstance] ( identifier[self] . identifier[_version] , identifier[payload] , identifier[service_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[channel_sid] = identifier[self] . identifier[_solution] [ literal[string] ], identifier[sid] = identifier[self] . identifier[_solution] [ literal[string] ], )
def update(self, role_sid=values.unset, last_consumed_message_index=values.unset): """ Update the MemberInstance :param unicode role_sid: The Role assigned to this member. :param unicode last_consumed_message_index: An Integer representing index of the last Message this Member has read within this Channel :returns: Updated MemberInstance :rtype: twilio.rest.chat.v1.service.channel.member.MemberInstance """ data = values.of({'RoleSid': role_sid, 'LastConsumedMessageIndex': last_consumed_message_index}) payload = self._version.update('POST', self._uri, data=data) return MemberInstance(self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], sid=self._solution['sid'])
def expose(func): """ Decorator to be used with :class:`SDKWrapper`. This decorator indicates that the wrapped method or class should be exposed in the proxied object. Args: func(types.FunctionType/types.MethodType): function to decorate Returns: None """ @functools.wraps(func) def wrapped(*args, **kwargs): return func(*args, **kwargs) if inspect.isclass(func): wrapped._sdkmetaclass = SDKMethod(func.__name__) else: wrapped._sdkmeta = SDKMethod(func.__name__) return wrapped
def function[expose, parameter[func]]: constant[ Decorator to be used with :class:`SDKWrapper`. This decorator indicates that the wrapped method or class should be exposed in the proxied object. Args: func(types.FunctionType/types.MethodType): function to decorate Returns: None ] def function[wrapped, parameter[]]: return[call[name[func], parameter[<ast.Starred object at 0x7da18f723df0>]]] if call[name[inspect].isclass, parameter[name[func]]] begin[:] name[wrapped]._sdkmetaclass assign[=] call[name[SDKMethod], parameter[name[func].__name__]] return[name[wrapped]]
keyword[def] identifier[expose] ( identifier[func] ): literal[string] @ identifier[functools] . identifier[wraps] ( identifier[func] ) keyword[def] identifier[wrapped] (* identifier[args] ,** identifier[kwargs] ): keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] ) keyword[if] identifier[inspect] . identifier[isclass] ( identifier[func] ): identifier[wrapped] . identifier[_sdkmetaclass] = identifier[SDKMethod] ( identifier[func] . identifier[__name__] ) keyword[else] : identifier[wrapped] . identifier[_sdkmeta] = identifier[SDKMethod] ( identifier[func] . identifier[__name__] ) keyword[return] identifier[wrapped]
def expose(func): """ Decorator to be used with :class:`SDKWrapper`. This decorator indicates that the wrapped method or class should be exposed in the proxied object. Args: func(types.FunctionType/types.MethodType): function to decorate Returns: None """ @functools.wraps(func) def wrapped(*args, **kwargs): return func(*args, **kwargs) if inspect.isclass(func): wrapped._sdkmetaclass = SDKMethod(func.__name__) # depends on [control=['if'], data=[]] else: wrapped._sdkmeta = SDKMethod(func.__name__) return wrapped
def url(self, _url, **kwargs): """ This will return the url for a Request instead of actually performing the request, and then store it in self['label']. * Note the API call isn't actually made but it is setup to call save or open_as_file :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: str of the url for the api_call """ api_call = self._create_api_call('get', _url, kwargs) data = self._clean_arguments(kwargs.pop('data', None)) params = self._clean_arguments(kwargs) api_call.set_request(method='GET', data=data, params=params, sub_url=_url) api_call._stage = 'URL' return api_call.url
def function[url, parameter[self, _url]]: constant[ This will return the url for a Request instead of actually performing the request, and then store it in self['label']. * Note the API call isn't actually made but it is setup to call save or open_as_file :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: str of the url for the api_call ] variable[api_call] assign[=] call[name[self]._create_api_call, parameter[constant[get], name[_url], name[kwargs]]] variable[data] assign[=] call[name[self]._clean_arguments, parameter[call[name[kwargs].pop, parameter[constant[data], constant[None]]]]] variable[params] assign[=] call[name[self]._clean_arguments, parameter[name[kwargs]]] call[name[api_call].set_request, parameter[]] name[api_call]._stage assign[=] constant[URL] return[name[api_call].url]
keyword[def] identifier[url] ( identifier[self] , identifier[_url] ,** identifier[kwargs] ): literal[string] identifier[api_call] = identifier[self] . identifier[_create_api_call] ( literal[string] , identifier[_url] , identifier[kwargs] ) identifier[data] = identifier[self] . identifier[_clean_arguments] ( identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )) identifier[params] = identifier[self] . identifier[_clean_arguments] ( identifier[kwargs] ) identifier[api_call] . identifier[set_request] ( identifier[method] = literal[string] , identifier[data] = identifier[data] , identifier[params] = identifier[params] , identifier[sub_url] = identifier[_url] ) identifier[api_call] . identifier[_stage] = literal[string] keyword[return] identifier[api_call] . identifier[url]
def url(self, _url, **kwargs): """ This will return the url for a Request instead of actually performing the request, and then store it in self['label']. * Note the API call isn't actually made but it is setup to call save or open_as_file :param _url: str of the sub url of the api call (ex. g/device/list) :param kwargs: dict of additional arguments :return: str of the url for the api_call """ api_call = self._create_api_call('get', _url, kwargs) data = self._clean_arguments(kwargs.pop('data', None)) params = self._clean_arguments(kwargs) api_call.set_request(method='GET', data=data, params=params, sub_url=_url) api_call._stage = 'URL' return api_call.url
def _init_polling(self): """ Bootstrap polling for throttler. To avoid spiky traffic from throttler clients, we use a random delay before the first poll. """ with self.lock: if not self.running: return r = random.Random() delay = r.random() * self.refresh_interval self.channel.io_loop.call_later( delay=delay, callback=self._delayed_polling) self.logger.info( 'Delaying throttling credit polling by %d sec', delay)
def function[_init_polling, parameter[self]]: constant[ Bootstrap polling for throttler. To avoid spiky traffic from throttler clients, we use a random delay before the first poll. ] with name[self].lock begin[:] if <ast.UnaryOp object at 0x7da20e9569b0> begin[:] return[None] variable[r] assign[=] call[name[random].Random, parameter[]] variable[delay] assign[=] binary_operation[call[name[r].random, parameter[]] * name[self].refresh_interval] call[name[self].channel.io_loop.call_later, parameter[]] call[name[self].logger.info, parameter[constant[Delaying throttling credit polling by %d sec], name[delay]]]
keyword[def] identifier[_init_polling] ( identifier[self] ): literal[string] keyword[with] identifier[self] . identifier[lock] : keyword[if] keyword[not] identifier[self] . identifier[running] : keyword[return] identifier[r] = identifier[random] . identifier[Random] () identifier[delay] = identifier[r] . identifier[random] ()* identifier[self] . identifier[refresh_interval] identifier[self] . identifier[channel] . identifier[io_loop] . identifier[call_later] ( identifier[delay] = identifier[delay] , identifier[callback] = identifier[self] . identifier[_delayed_polling] ) identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[delay] )
def _init_polling(self): """ Bootstrap polling for throttler. To avoid spiky traffic from throttler clients, we use a random delay before the first poll. """ with self.lock: if not self.running: return # depends on [control=['if'], data=[]] r = random.Random() delay = r.random() * self.refresh_interval self.channel.io_loop.call_later(delay=delay, callback=self._delayed_polling) self.logger.info('Delaying throttling credit polling by %d sec', delay) # depends on [control=['with'], data=[]]
def primitive(self, dictionary): """Item from Python primitive.""" self.__dict__ = {k: v for k, v in dictionary.items() if v}
def function[primitive, parameter[self, dictionary]]: constant[Item from Python primitive.] name[self].__dict__ assign[=] <ast.DictComp object at 0x7da1b1930af0>
keyword[def] identifier[primitive] ( identifier[self] , identifier[dictionary] ): literal[string] identifier[self] . identifier[__dict__] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dictionary] . identifier[items] () keyword[if] identifier[v] }
def primitive(self, dictionary): """Item from Python primitive.""" self.__dict__ = {k: v for (k, v) in dictionary.items() if v}
def length(self) -> Optional[int]: """ 获取 body 长度 """ len_ = self.get("content-length") if len_ is not None: return int(cast(str, len_)) return None
def function[length, parameter[self]]: constant[ 获取 body 长度 ] variable[len_] assign[=] call[name[self].get, parameter[constant[content-length]]] if compare[name[len_] is_not constant[None]] begin[:] return[call[name[int], parameter[call[name[cast], parameter[name[str], name[len_]]]]]] return[constant[None]]
keyword[def] identifier[length] ( identifier[self] )-> identifier[Optional] [ identifier[int] ]: literal[string] identifier[len_] = identifier[self] . identifier[get] ( literal[string] ) keyword[if] identifier[len_] keyword[is] keyword[not] keyword[None] : keyword[return] identifier[int] ( identifier[cast] ( identifier[str] , identifier[len_] )) keyword[return] keyword[None]
def length(self) -> Optional[int]: """ 获取 body 长度 """ len_ = self.get('content-length') if len_ is not None: return int(cast(str, len_)) # depends on [control=['if'], data=['len_']] return None
def initial_contact(self, enable_ssh=True, time_zone=None, keyboard=None, install_on_server=None, filename=None, as_base64=False): """ Allows to save the initial contact for for the specified node :param bool enable_ssh: flag to know if we allow the ssh daemon on the specified node :param str time_zone: optional time zone to set on the specified node :param str keyboard: optional keyboard to set on the specified node :param bool install_on_server: optional flag to know if the generated configuration needs to be installed on SMC Install server (POS is needed) :param str filename: filename to save initial_contact to :param bool as_base64: return the initial config in base 64 format. Useful for cloud based engine deployments as userdata :raises NodeCommandFailed: IOError handling initial configuration data :return: initial contact text information :rtype: str """ result = self.make_request( NodeCommandFailed, method='create', raw_result=True, resource='initial_contact', params={'enable_ssh': enable_ssh}) if result.content: if as_base64: result.content = b64encode(result.content) if filename: try: save_to_file(filename, result.content) except IOError as e: raise NodeCommandFailed( 'Error occurred when attempting to save initial ' 'contact to file: {}'.format(e)) return result.content
def function[initial_contact, parameter[self, enable_ssh, time_zone, keyboard, install_on_server, filename, as_base64]]: constant[ Allows to save the initial contact for for the specified node :param bool enable_ssh: flag to know if we allow the ssh daemon on the specified node :param str time_zone: optional time zone to set on the specified node :param str keyboard: optional keyboard to set on the specified node :param bool install_on_server: optional flag to know if the generated configuration needs to be installed on SMC Install server (POS is needed) :param str filename: filename to save initial_contact to :param bool as_base64: return the initial config in base 64 format. Useful for cloud based engine deployments as userdata :raises NodeCommandFailed: IOError handling initial configuration data :return: initial contact text information :rtype: str ] variable[result] assign[=] call[name[self].make_request, parameter[name[NodeCommandFailed]]] if name[result].content begin[:] if name[as_base64] begin[:] name[result].content assign[=] call[name[b64encode], parameter[name[result].content]] if name[filename] begin[:] <ast.Try object at 0x7da1b1bc0a00> return[name[result].content]
keyword[def] identifier[initial_contact] ( identifier[self] , identifier[enable_ssh] = keyword[True] , identifier[time_zone] = keyword[None] , identifier[keyboard] = keyword[None] , identifier[install_on_server] = keyword[None] , identifier[filename] = keyword[None] , identifier[as_base64] = keyword[False] ): literal[string] identifier[result] = identifier[self] . identifier[make_request] ( identifier[NodeCommandFailed] , identifier[method] = literal[string] , identifier[raw_result] = keyword[True] , identifier[resource] = literal[string] , identifier[params] ={ literal[string] : identifier[enable_ssh] }) keyword[if] identifier[result] . identifier[content] : keyword[if] identifier[as_base64] : identifier[result] . identifier[content] = identifier[b64encode] ( identifier[result] . identifier[content] ) keyword[if] identifier[filename] : keyword[try] : identifier[save_to_file] ( identifier[filename] , identifier[result] . identifier[content] ) keyword[except] identifier[IOError] keyword[as] identifier[e] : keyword[raise] identifier[NodeCommandFailed] ( literal[string] literal[string] . identifier[format] ( identifier[e] )) keyword[return] identifier[result] . identifier[content]
def initial_contact(self, enable_ssh=True, time_zone=None, keyboard=None, install_on_server=None, filename=None, as_base64=False): """ Allows to save the initial contact for for the specified node :param bool enable_ssh: flag to know if we allow the ssh daemon on the specified node :param str time_zone: optional time zone to set on the specified node :param str keyboard: optional keyboard to set on the specified node :param bool install_on_server: optional flag to know if the generated configuration needs to be installed on SMC Install server (POS is needed) :param str filename: filename to save initial_contact to :param bool as_base64: return the initial config in base 64 format. Useful for cloud based engine deployments as userdata :raises NodeCommandFailed: IOError handling initial configuration data :return: initial contact text information :rtype: str """ result = self.make_request(NodeCommandFailed, method='create', raw_result=True, resource='initial_contact', params={'enable_ssh': enable_ssh}) if result.content: if as_base64: result.content = b64encode(result.content) # depends on [control=['if'], data=[]] if filename: try: save_to_file(filename, result.content) # depends on [control=['try'], data=[]] except IOError as e: raise NodeCommandFailed('Error occurred when attempting to save initial contact to file: {}'.format(e)) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] return result.content
def distort(self, img, rotX=0, rotY=0, quad=None): ''' Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image ''' self.img = imread(img) # fit old image to self.quad: corr = self.correct(self.img) s = self.img.shape if quad is None: wquad = (self.quad - self.quad.mean(axis=0)).astype(float) win_width = s[1] win_height = s[0] # project quad: for n, q in enumerate(wquad): p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY) p = p.project(win_width, win_height, s[1], s[1]) wquad[n] = (p.x, p.y) wquad = sortCorners(wquad) # scale result so that longest side of quad and wquad are equal w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() scale = min(s[1] / w, s[0] / h) # scale: wquad = (wquad * scale).astype(int) else: wquad = sortCorners(quad) wquad -= wquad.min(axis=0) lx = corr.shape[1] ly = corr.shape[0] objP = np.array([ [0, 0], [lx, 0], [lx, ly], [0, ly], ], dtype=np.float32) homography = cv2.getPerspectiveTransform( wquad.astype(np.float32), objP) # distort corr: w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() #(int(w),int(h)) dist = cv2.warpPerspective(corr, homography, (int(w), int(h)), flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP) # move middle of dist to middle of the old quad: bg = np.zeros(shape=s) rmn = (bg.shape[0] / 2, bg.shape[1] / 2) ss = dist.shape mn = (ss[0] / 2, ss[1] / 2) # wquad.mean(axis=0) ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1])) bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist # finally move quad into right position: self.quad = wquad self.quad += (ref[1], ref[0]) self.img = bg self._homography = None self._poseFromQuad() if self.opts['do_correctIntensity']: tf = self.tiltFactor() if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] *= tf else: # tf = np.tile(tf, (1,1,self.img.shape[2])) self.img = self.img * tf return self.img
def function[distort, parameter[self, img, rotX, rotY, quad]]: constant[ Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image ] name[self].img assign[=] call[name[imread], parameter[name[img]]] variable[corr] assign[=] call[name[self].correct, parameter[name[self].img]] variable[s] assign[=] name[self].img.shape if compare[name[quad] is constant[None]] begin[:] variable[wquad] assign[=] call[binary_operation[name[self].quad - call[name[self].quad.mean, parameter[]]].astype, parameter[name[float]]] variable[win_width] assign[=] call[name[s]][constant[1]] variable[win_height] assign[=] call[name[s]][constant[0]] for taget[tuple[[<ast.Name object at 0x7da1b1107850>, <ast.Name object at 0x7da1b1104370>]]] in starred[call[name[enumerate], parameter[name[wquad]]]] begin[:] variable[p] assign[=] call[call[call[name[Point3D], parameter[call[name[q]][constant[0]], call[name[q]][constant[1]], constant[0]]].rotateX, parameter[<ast.UnaryOp object at 0x7da1b1107580>]].rotateY, parameter[<ast.UnaryOp object at 0x7da1b1105c00>]] variable[p] assign[=] call[name[p].project, parameter[name[win_width], name[win_height], call[name[s]][constant[1]], call[name[s]][constant[1]]]] call[name[wquad]][name[n]] assign[=] tuple[[<ast.Attribute object at 0x7da1b11078b0>, <ast.Attribute object at 0x7da1b11077f0>]] variable[wquad] assign[=] call[name[sortCorners], parameter[name[wquad]]] variable[w] assign[=] binary_operation[call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1104e20>, <ast.Constant object at 0x7da1b11060e0>]]].max, parameter[]] - call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b11061d0>, <ast.Constant object at 0x7da1b1106320>]]].min, parameter[]]] variable[h] assign[=] binary_operation[call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1105600>, <ast.Constant object at 0x7da1b1105000>]]].max, parameter[]] - call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1106530>, <ast.Constant object at 0x7da1b1106020>]]].min, parameter[]]] variable[scale] assign[=] call[name[min], parameter[binary_operation[call[name[s]][constant[1]] / name[w]], binary_operation[call[name[s]][constant[0]] / name[h]]]] variable[wquad] assign[=] call[binary_operation[name[wquad] * name[scale]].astype, parameter[name[int]]] <ast.AugAssign object at 0x7da1b1106110> variable[lx] assign[=] call[name[corr].shape][constant[1]] variable[ly] assign[=] call[name[corr].shape][constant[0]] variable[objP] assign[=] call[name[np].array, parameter[list[[<ast.List object at 0x7da1b1107b50>, <ast.List object at 0x7da1b1105420>, <ast.List object at 0x7da1b1107760>, <ast.List object at 0x7da1b1107970>]]]] variable[homography] assign[=] call[name[cv2].getPerspectiveTransform, parameter[call[name[wquad].astype, parameter[name[np].float32]], name[objP]]] variable[w] assign[=] binary_operation[call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1106920>, <ast.Constant object at 0x7da1b1107130>]]].max, parameter[]] - call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1106380>, <ast.Constant object at 0x7da1b11057e0>]]].min, parameter[]]] variable[h] assign[=] binary_operation[call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b11053c0>, <ast.Constant object at 0x7da1b1106a40>]]].max, parameter[]] - call[call[name[wquad]][tuple[[<ast.Slice object at 0x7da1b1105300>, <ast.Constant object at 0x7da1b1106ec0>]]].min, parameter[]]] variable[dist] assign[=] call[name[cv2].warpPerspective, parameter[name[corr], name[homography], tuple[[<ast.Call object at 0x7da1b1104af0>, <ast.Call object at 0x7da1b11062c0>]]]] variable[bg] assign[=] call[name[np].zeros, parameter[]] variable[rmn] assign[=] tuple[[<ast.BinOp object at 0x7da1b1117fd0>, <ast.BinOp object at 0x7da1b1117d00>]] variable[ss] assign[=] name[dist].shape variable[mn] assign[=] tuple[[<ast.BinOp object at 0x7da1b1114370>, <ast.BinOp object at 0x7da1b11142e0>]] variable[ref] assign[=] tuple[[<ast.Call object at 0x7da1b11143a0>, <ast.Call object at 0x7da1b1115570>]] call[name[bg]][tuple[[<ast.Slice object at 0x7da1b11152a0>, <ast.Slice object at 0x7da1b11146a0>]]] assign[=] name[dist] name[self].quad assign[=] name[wquad] <ast.AugAssign object at 0x7da1b1116aa0> name[self].img assign[=] name[bg] name[self]._homography assign[=] constant[None] call[name[self]._poseFromQuad, parameter[]] if call[name[self].opts][constant[do_correctIntensity]] begin[:] variable[tf] assign[=] call[name[self].tiltFactor, parameter[]] if compare[name[self].img.ndim equal[==] constant[3]] begin[:] for taget[name[col]] in starred[call[name[range], parameter[call[name[self].img.shape][constant[2]]]]] begin[:] <ast.AugAssign object at 0x7da1b1116590> return[name[self].img]
keyword[def] identifier[distort] ( identifier[self] , identifier[img] , identifier[rotX] = literal[int] , identifier[rotY] = literal[int] , identifier[quad] = keyword[None] ): literal[string] identifier[self] . identifier[img] = identifier[imread] ( identifier[img] ) identifier[corr] = identifier[self] . identifier[correct] ( identifier[self] . identifier[img] ) identifier[s] = identifier[self] . identifier[img] . identifier[shape] keyword[if] identifier[quad] keyword[is] keyword[None] : identifier[wquad] =( identifier[self] . identifier[quad] - identifier[self] . identifier[quad] . identifier[mean] ( identifier[axis] = literal[int] )). identifier[astype] ( identifier[float] ) identifier[win_width] = identifier[s] [ literal[int] ] identifier[win_height] = identifier[s] [ literal[int] ] keyword[for] identifier[n] , identifier[q] keyword[in] identifier[enumerate] ( identifier[wquad] ): identifier[p] = identifier[Point3D] ( identifier[q] [ literal[int] ], identifier[q] [ literal[int] ], literal[int] ). identifier[rotateX] (- identifier[rotX] ). identifier[rotateY] (- identifier[rotY] ) identifier[p] = identifier[p] . identifier[project] ( identifier[win_width] , identifier[win_height] , identifier[s] [ literal[int] ], identifier[s] [ literal[int] ]) identifier[wquad] [ identifier[n] ]=( identifier[p] . identifier[x] , identifier[p] . identifier[y] ) identifier[wquad] = identifier[sortCorners] ( identifier[wquad] ) identifier[w] = identifier[wquad] [:, literal[int] ]. identifier[max] ()- identifier[wquad] [:, literal[int] ]. identifier[min] () identifier[h] = identifier[wquad] [:, literal[int] ]. identifier[max] ()- identifier[wquad] [:, literal[int] ]. identifier[min] () identifier[scale] = identifier[min] ( identifier[s] [ literal[int] ]/ identifier[w] , identifier[s] [ literal[int] ]/ identifier[h] ) identifier[wquad] =( identifier[wquad] * identifier[scale] ). identifier[astype] ( identifier[int] ) keyword[else] : identifier[wquad] = identifier[sortCorners] ( identifier[quad] ) identifier[wquad] -= identifier[wquad] . identifier[min] ( identifier[axis] = literal[int] ) identifier[lx] = identifier[corr] . identifier[shape] [ literal[int] ] identifier[ly] = identifier[corr] . identifier[shape] [ literal[int] ] identifier[objP] = identifier[np] . identifier[array] ([ [ literal[int] , literal[int] ], [ identifier[lx] , literal[int] ], [ identifier[lx] , identifier[ly] ], [ literal[int] , identifier[ly] ], ], identifier[dtype] = identifier[np] . identifier[float32] ) identifier[homography] = identifier[cv2] . identifier[getPerspectiveTransform] ( identifier[wquad] . identifier[astype] ( identifier[np] . identifier[float32] ), identifier[objP] ) identifier[w] = identifier[wquad] [:, literal[int] ]. identifier[max] ()- identifier[wquad] [:, literal[int] ]. identifier[min] () identifier[h] = identifier[wquad] [:, literal[int] ]. identifier[max] ()- identifier[wquad] [:, literal[int] ]. identifier[min] () identifier[dist] = identifier[cv2] . identifier[warpPerspective] ( identifier[corr] , identifier[homography] ,( identifier[int] ( identifier[w] ), identifier[int] ( identifier[h] )), identifier[flags] = identifier[cv2] . identifier[INTER_CUBIC] | identifier[cv2] . identifier[WARP_INVERSE_MAP] ) identifier[bg] = identifier[np] . identifier[zeros] ( identifier[shape] = identifier[s] ) identifier[rmn] =( identifier[bg] . identifier[shape] [ literal[int] ]/ literal[int] , identifier[bg] . identifier[shape] [ literal[int] ]/ literal[int] ) identifier[ss] = identifier[dist] . identifier[shape] identifier[mn] =( identifier[ss] [ literal[int] ]/ literal[int] , identifier[ss] [ literal[int] ]/ literal[int] ) identifier[ref] =( identifier[int] ( identifier[rmn] [ literal[int] ]- identifier[mn] [ literal[int] ]), identifier[int] ( identifier[rmn] [ literal[int] ]- identifier[mn] [ literal[int] ])) identifier[bg] [ identifier[ref] [ literal[int] ]: identifier[ss] [ literal[int] ]+ identifier[ref] [ literal[int] ], identifier[ref] [ literal[int] ]: identifier[ss] [ literal[int] ]+ identifier[ref] [ literal[int] ]]= identifier[dist] identifier[self] . identifier[quad] = identifier[wquad] identifier[self] . identifier[quad] +=( identifier[ref] [ literal[int] ], identifier[ref] [ literal[int] ]) identifier[self] . identifier[img] = identifier[bg] identifier[self] . identifier[_homography] = keyword[None] identifier[self] . identifier[_poseFromQuad] () keyword[if] identifier[self] . identifier[opts] [ literal[string] ]: identifier[tf] = identifier[self] . identifier[tiltFactor] () keyword[if] identifier[self] . identifier[img] . identifier[ndim] == literal[int] : keyword[for] identifier[col] keyword[in] identifier[range] ( identifier[self] . identifier[img] . identifier[shape] [ literal[int] ]): identifier[self] . identifier[img] [..., identifier[col] ]*= identifier[tf] keyword[else] : identifier[self] . identifier[img] = identifier[self] . identifier[img] * identifier[tf] keyword[return] identifier[self] . identifier[img]
def distort(self, img, rotX=0, rotY=0, quad=None): """ Apply perspective distortion ion self.img angles are in DEG and need to be positive to fit into image """ self.img = imread(img) # fit old image to self.quad: corr = self.correct(self.img) s = self.img.shape if quad is None: wquad = (self.quad - self.quad.mean(axis=0)).astype(float) win_width = s[1] win_height = s[0] # project quad: for (n, q) in enumerate(wquad): p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY) p = p.project(win_width, win_height, s[1], s[1]) wquad[n] = (p.x, p.y) # depends on [control=['for'], data=[]] wquad = sortCorners(wquad) # scale result so that longest side of quad and wquad are equal w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() scale = min(s[1] / w, s[0] / h) # scale: wquad = (wquad * scale).astype(int) # depends on [control=['if'], data=[]] else: wquad = sortCorners(quad) wquad -= wquad.min(axis=0) lx = corr.shape[1] ly = corr.shape[0] objP = np.array([[0, 0], [lx, 0], [lx, ly], [0, ly]], dtype=np.float32) homography = cv2.getPerspectiveTransform(wquad.astype(np.float32), objP) # distort corr: w = wquad[:, 0].max() - wquad[:, 0].min() h = wquad[:, 1].max() - wquad[:, 1].min() #(int(w),int(h)) dist = cv2.warpPerspective(corr, homography, (int(w), int(h)), flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP) # move middle of dist to middle of the old quad: bg = np.zeros(shape=s) rmn = (bg.shape[0] / 2, bg.shape[1] / 2) ss = dist.shape mn = (ss[0] / 2, ss[1] / 2) # wquad.mean(axis=0) ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1])) bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist # finally move quad into right position: self.quad = wquad self.quad += (ref[1], ref[0]) self.img = bg self._homography = None self._poseFromQuad() if self.opts['do_correctIntensity']: tf = self.tiltFactor() if self.img.ndim == 3: for col in range(self.img.shape[2]): self.img[..., col] *= tf # depends on [control=['for'], data=['col']] # depends on [control=['if'], data=[]] else: # tf = np.tile(tf, (1,1,self.img.shape[2])) self.img = self.img * tf # depends on [control=['if'], data=[]] return self.img
def sample(self, bqm, init_solution=None, tenure=None, scale_factor=1, timeout=20, num_reads=1): """Run a tabu search on a given binary quadratic model. Args: bqm (:obj:`~dimod.BinaryQuadraticModel`): The binary quadratic model (BQM) to be sampled. init_solution (:obj:`~dimod.SampleSet`, optional): Single sample that sets an initial state for all the problem variables. Default is a random initial state. tenure (int, optional): Tabu tenure, which is the length of the tabu list, or number of recently explored solutions kept in memory. Default is a quarter of the number of problem variables up to a maximum value of 20. scale_factor (number, optional): Scaling factor for linear and quadratic biases in the BQM. Internally, the BQM is converted to a QUBO matrix, and elements are stored as long ints using ``internal_q = long int (q * scale_factor)``. timeout (int, optional): Total running time in milliseconds. num_reads (int, optional): Number of reads. Each run of the tabu algorithm generates a sample. Returns: :obj:`~dimod.SampleSet`: A `dimod` :obj:`.~dimod.SampleSet` object. Examples: This example provides samples for a two-variable QUBO model. >>> from tabu import TabuSampler >>> import dimod >>> sampler = TabuSampler() >>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2} >>> bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset=0.0) >>> samples = sampler.sample(bqm) >>> samples.record[0].energy -1.0 """ # input checking and defaults calculation # TODO: one "read" per sample in init_solution sampleset if init_solution is not None: if not isinstance(init_solution, dimod.SampleSet): raise TypeError("'init_solution' should be a 'dimod.SampleSet' instance") if len(init_solution.record) < 1: raise ValueError("'init_solution' should contain at least one sample") if len(init_solution.record[0].sample) != len(bqm): raise ValueError("'init_solution' sample dimension different from BQM") init_sample = self._bqm_sample_to_tabu_sample( init_solution.change_vartype(dimod.BINARY, inplace=False).record[0].sample, bqm.binary) else: init_sample = None if not bqm: return dimod.SampleSet.from_samples([], energy=0, vartype=bqm.vartype) if tenure is None: tenure = max(min(20, len(bqm) // 4), 0) if not isinstance(tenure, int): raise TypeError("'tenure' should be an integer in range [0, num_vars - 1]") if not 0 <= tenure < len(bqm): raise ValueError("'tenure' should be an integer in range [0, num_vars - 1]") if not isinstance(num_reads, int): raise TypeError("'num_reads' should be a positive integer") if num_reads < 1: raise ValueError("'num_reads' should be a positive integer") qubo = self._bqm_to_tabu_qubo(bqm.binary) # run Tabu search samples = [] energies = [] for _ in range(num_reads): if init_sample is None: init_sample = self._bqm_sample_to_tabu_sample(self._random_sample(bqm.binary), bqm.binary) r = TabuSearch(qubo, init_sample, tenure, scale_factor, timeout) sample = self._tabu_sample_to_bqm_sample(list(r.bestSolution()), bqm.binary) energy = bqm.binary.energy(sample) samples.append(sample) energies.append(energy) response = dimod.SampleSet.from_samples( samples, energy=energies, vartype=dimod.BINARY) response.change_vartype(bqm.vartype, inplace=True) return response
def function[sample, parameter[self, bqm, init_solution, tenure, scale_factor, timeout, num_reads]]: constant[Run a tabu search on a given binary quadratic model. Args: bqm (:obj:`~dimod.BinaryQuadraticModel`): The binary quadratic model (BQM) to be sampled. init_solution (:obj:`~dimod.SampleSet`, optional): Single sample that sets an initial state for all the problem variables. Default is a random initial state. tenure (int, optional): Tabu tenure, which is the length of the tabu list, or number of recently explored solutions kept in memory. Default is a quarter of the number of problem variables up to a maximum value of 20. scale_factor (number, optional): Scaling factor for linear and quadratic biases in the BQM. Internally, the BQM is converted to a QUBO matrix, and elements are stored as long ints using ``internal_q = long int (q * scale_factor)``. timeout (int, optional): Total running time in milliseconds. num_reads (int, optional): Number of reads. Each run of the tabu algorithm generates a sample. Returns: :obj:`~dimod.SampleSet`: A `dimod` :obj:`.~dimod.SampleSet` object. Examples: This example provides samples for a two-variable QUBO model. >>> from tabu import TabuSampler >>> import dimod >>> sampler = TabuSampler() >>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2} >>> bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset=0.0) >>> samples = sampler.sample(bqm) >>> samples.record[0].energy -1.0 ] if compare[name[init_solution] is_not constant[None]] begin[:] if <ast.UnaryOp object at 0x7da1b0c3ccd0> begin[:] <ast.Raise object at 0x7da1b0c3c670> if compare[call[name[len], parameter[name[init_solution].record]] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da1b0c3f4c0> if compare[call[name[len], parameter[call[name[init_solution].record][constant[0]].sample]] not_equal[!=] call[name[len], parameter[name[bqm]]]] begin[:] <ast.Raise object at 0x7da1b0c3e4a0> variable[init_sample] assign[=] call[name[self]._bqm_sample_to_tabu_sample, parameter[call[call[name[init_solution].change_vartype, parameter[name[dimod].BINARY]].record][constant[0]].sample, name[bqm].binary]] if <ast.UnaryOp object at 0x7da1b0c3cb80> begin[:] return[call[name[dimod].SampleSet.from_samples, parameter[list[[]]]]] if compare[name[tenure] is constant[None]] begin[:] variable[tenure] assign[=] call[name[max], parameter[call[name[min], parameter[constant[20], binary_operation[call[name[len], parameter[name[bqm]]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[4]]]], constant[0]]] if <ast.UnaryOp object at 0x7da1b0c3fdc0> begin[:] <ast.Raise object at 0x7da1b0c3c7f0> if <ast.UnaryOp object at 0x7da1b0c3f130> begin[:] <ast.Raise object at 0x7da1b0c3cc70> if <ast.UnaryOp object at 0x7da1b0c3f970> begin[:] <ast.Raise object at 0x7da1b0c3f760> if compare[name[num_reads] less[<] constant[1]] begin[:] <ast.Raise object at 0x7da1b0c3e500> variable[qubo] assign[=] call[name[self]._bqm_to_tabu_qubo, parameter[name[bqm].binary]] variable[samples] assign[=] list[[]] variable[energies] assign[=] list[[]] for taget[name[_]] in starred[call[name[range], parameter[name[num_reads]]]] begin[:] if compare[name[init_sample] is constant[None]] begin[:] variable[init_sample] assign[=] call[name[self]._bqm_sample_to_tabu_sample, parameter[call[name[self]._random_sample, parameter[name[bqm].binary]], name[bqm].binary]] variable[r] assign[=] call[name[TabuSearch], parameter[name[qubo], name[init_sample], name[tenure], name[scale_factor], name[timeout]]] variable[sample] assign[=] call[name[self]._tabu_sample_to_bqm_sample, parameter[call[name[list], parameter[call[name[r].bestSolution, parameter[]]]], name[bqm].binary]] variable[energy] assign[=] call[name[bqm].binary.energy, parameter[name[sample]]] call[name[samples].append, parameter[name[sample]]] call[name[energies].append, parameter[name[energy]]] variable[response] assign[=] call[name[dimod].SampleSet.from_samples, parameter[name[samples]]] call[name[response].change_vartype, parameter[name[bqm].vartype]] return[name[response]]
keyword[def] identifier[sample] ( identifier[self] , identifier[bqm] , identifier[init_solution] = keyword[None] , identifier[tenure] = keyword[None] , identifier[scale_factor] = literal[int] , identifier[timeout] = literal[int] , identifier[num_reads] = literal[int] ): literal[string] keyword[if] identifier[init_solution] keyword[is] keyword[not] keyword[None] : keyword[if] keyword[not] identifier[isinstance] ( identifier[init_solution] , identifier[dimod] . identifier[SampleSet] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[len] ( identifier[init_solution] . identifier[record] )< literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] identifier[len] ( identifier[init_solution] . identifier[record] [ literal[int] ]. identifier[sample] )!= identifier[len] ( identifier[bqm] ): keyword[raise] identifier[ValueError] ( literal[string] ) identifier[init_sample] = identifier[self] . identifier[_bqm_sample_to_tabu_sample] ( identifier[init_solution] . identifier[change_vartype] ( identifier[dimod] . identifier[BINARY] , identifier[inplace] = keyword[False] ). identifier[record] [ literal[int] ]. identifier[sample] , identifier[bqm] . identifier[binary] ) keyword[else] : identifier[init_sample] = keyword[None] keyword[if] keyword[not] identifier[bqm] : keyword[return] identifier[dimod] . identifier[SampleSet] . identifier[from_samples] ([], identifier[energy] = literal[int] , identifier[vartype] = identifier[bqm] . identifier[vartype] ) keyword[if] identifier[tenure] keyword[is] keyword[None] : identifier[tenure] = identifier[max] ( identifier[min] ( literal[int] , identifier[len] ( identifier[bqm] )// literal[int] ), literal[int] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[tenure] , identifier[int] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] keyword[not] literal[int] <= identifier[tenure] < identifier[len] ( identifier[bqm] ): keyword[raise] identifier[ValueError] ( literal[string] ) keyword[if] keyword[not] identifier[isinstance] ( identifier[num_reads] , identifier[int] ): keyword[raise] identifier[TypeError] ( literal[string] ) keyword[if] identifier[num_reads] < literal[int] : keyword[raise] identifier[ValueError] ( literal[string] ) identifier[qubo] = identifier[self] . identifier[_bqm_to_tabu_qubo] ( identifier[bqm] . identifier[binary] ) identifier[samples] =[] identifier[energies] =[] keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[num_reads] ): keyword[if] identifier[init_sample] keyword[is] keyword[None] : identifier[init_sample] = identifier[self] . identifier[_bqm_sample_to_tabu_sample] ( identifier[self] . identifier[_random_sample] ( identifier[bqm] . identifier[binary] ), identifier[bqm] . identifier[binary] ) identifier[r] = identifier[TabuSearch] ( identifier[qubo] , identifier[init_sample] , identifier[tenure] , identifier[scale_factor] , identifier[timeout] ) identifier[sample] = identifier[self] . identifier[_tabu_sample_to_bqm_sample] ( identifier[list] ( identifier[r] . identifier[bestSolution] ()), identifier[bqm] . identifier[binary] ) identifier[energy] = identifier[bqm] . identifier[binary] . identifier[energy] ( identifier[sample] ) identifier[samples] . identifier[append] ( identifier[sample] ) identifier[energies] . identifier[append] ( identifier[energy] ) identifier[response] = identifier[dimod] . identifier[SampleSet] . identifier[from_samples] ( identifier[samples] , identifier[energy] = identifier[energies] , identifier[vartype] = identifier[dimod] . identifier[BINARY] ) identifier[response] . identifier[change_vartype] ( identifier[bqm] . identifier[vartype] , identifier[inplace] = keyword[True] ) keyword[return] identifier[response]
def sample(self, bqm, init_solution=None, tenure=None, scale_factor=1, timeout=20, num_reads=1): """Run a tabu search on a given binary quadratic model. Args: bqm (:obj:`~dimod.BinaryQuadraticModel`): The binary quadratic model (BQM) to be sampled. init_solution (:obj:`~dimod.SampleSet`, optional): Single sample that sets an initial state for all the problem variables. Default is a random initial state. tenure (int, optional): Tabu tenure, which is the length of the tabu list, or number of recently explored solutions kept in memory. Default is a quarter of the number of problem variables up to a maximum value of 20. scale_factor (number, optional): Scaling factor for linear and quadratic biases in the BQM. Internally, the BQM is converted to a QUBO matrix, and elements are stored as long ints using ``internal_q = long int (q * scale_factor)``. timeout (int, optional): Total running time in milliseconds. num_reads (int, optional): Number of reads. Each run of the tabu algorithm generates a sample. Returns: :obj:`~dimod.SampleSet`: A `dimod` :obj:`.~dimod.SampleSet` object. Examples: This example provides samples for a two-variable QUBO model. >>> from tabu import TabuSampler >>> import dimod >>> sampler = TabuSampler() >>> Q = {(0, 0): -1, (1, 1): -1, (0, 1): 2} >>> bqm = dimod.BinaryQuadraticModel.from_qubo(Q, offset=0.0) >>> samples = sampler.sample(bqm) >>> samples.record[0].energy -1.0 """ # input checking and defaults calculation # TODO: one "read" per sample in init_solution sampleset if init_solution is not None: if not isinstance(init_solution, dimod.SampleSet): raise TypeError("'init_solution' should be a 'dimod.SampleSet' instance") # depends on [control=['if'], data=[]] if len(init_solution.record) < 1: raise ValueError("'init_solution' should contain at least one sample") # depends on [control=['if'], data=[]] if len(init_solution.record[0].sample) != len(bqm): raise ValueError("'init_solution' sample dimension different from BQM") # depends on [control=['if'], data=[]] init_sample = self._bqm_sample_to_tabu_sample(init_solution.change_vartype(dimod.BINARY, inplace=False).record[0].sample, bqm.binary) # depends on [control=['if'], data=['init_solution']] else: init_sample = None if not bqm: return dimod.SampleSet.from_samples([], energy=0, vartype=bqm.vartype) # depends on [control=['if'], data=[]] if tenure is None: tenure = max(min(20, len(bqm) // 4), 0) # depends on [control=['if'], data=['tenure']] if not isinstance(tenure, int): raise TypeError("'tenure' should be an integer in range [0, num_vars - 1]") # depends on [control=['if'], data=[]] if not 0 <= tenure < len(bqm): raise ValueError("'tenure' should be an integer in range [0, num_vars - 1]") # depends on [control=['if'], data=[]] if not isinstance(num_reads, int): raise TypeError("'num_reads' should be a positive integer") # depends on [control=['if'], data=[]] if num_reads < 1: raise ValueError("'num_reads' should be a positive integer") # depends on [control=['if'], data=[]] qubo = self._bqm_to_tabu_qubo(bqm.binary) # run Tabu search samples = [] energies = [] for _ in range(num_reads): if init_sample is None: init_sample = self._bqm_sample_to_tabu_sample(self._random_sample(bqm.binary), bqm.binary) # depends on [control=['if'], data=['init_sample']] r = TabuSearch(qubo, init_sample, tenure, scale_factor, timeout) sample = self._tabu_sample_to_bqm_sample(list(r.bestSolution()), bqm.binary) energy = bqm.binary.energy(sample) samples.append(sample) energies.append(energy) # depends on [control=['for'], data=[]] response = dimod.SampleSet.from_samples(samples, energy=energies, vartype=dimod.BINARY) response.change_vartype(bqm.vartype, inplace=True) return response
def _read_modeling_results(self, directory, silent=False): """Read modeling results from a given mod/ directory. Possible values to read in are: * voltages * potentials * sensitivities """ voltage_file = directory + os.sep + 'volt.dat' if os.path.isfile(voltage_file): if not silent: print('reading voltages') self.read_voltages(voltage_file) sens_files = sorted(glob( directory + os.sep + 'sens' + os.sep + 'sens*.dat') ) # check if there are sensitivity files, and that the nr corresponds to # the nr of configs if(len(sens_files) > 0 and len(sens_files) == self.configs.nr_of_configs): print('reading sensitivities') self._read_sensitivities(directory + os.sep + 'sens') # same for potentials pot_files = sorted(glob( directory + os.sep + 'pot' + os.sep + 'pot*.dat') ) # check if there are sensitivity files, and that the nr corresponds to # the nr of configs if(len(pot_files) > 0 and len(pot_files) == self.configs.nr_of_configs): print('reading potentials') self._read_potentials(directory + os.sep + 'pot')
def function[_read_modeling_results, parameter[self, directory, silent]]: constant[Read modeling results from a given mod/ directory. Possible values to read in are: * voltages * potentials * sensitivities ] variable[voltage_file] assign[=] binary_operation[binary_operation[name[directory] + name[os].sep] + constant[volt.dat]] if call[name[os].path.isfile, parameter[name[voltage_file]]] begin[:] if <ast.UnaryOp object at 0x7da1b24e24a0> begin[:] call[name[print], parameter[constant[reading voltages]]] call[name[self].read_voltages, parameter[name[voltage_file]]] variable[sens_files] assign[=] call[name[sorted], parameter[call[name[glob], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[directory] + name[os].sep] + constant[sens]] + name[os].sep] + constant[sens*.dat]]]]]] if <ast.BoolOp object at 0x7da1b2298580> begin[:] call[name[print], parameter[constant[reading sensitivities]]] call[name[self]._read_sensitivities, parameter[binary_operation[binary_operation[name[directory] + name[os].sep] + constant[sens]]]] variable[pot_files] assign[=] call[name[sorted], parameter[call[name[glob], parameter[binary_operation[binary_operation[binary_operation[binary_operation[name[directory] + name[os].sep] + constant[pot]] + name[os].sep] + constant[pot*.dat]]]]]] if <ast.BoolOp object at 0x7da1b2298610> begin[:] call[name[print], parameter[constant[reading potentials]]] call[name[self]._read_potentials, parameter[binary_operation[binary_operation[name[directory] + name[os].sep] + constant[pot]]]]
keyword[def] identifier[_read_modeling_results] ( identifier[self] , identifier[directory] , identifier[silent] = keyword[False] ): literal[string] identifier[voltage_file] = identifier[directory] + identifier[os] . identifier[sep] + literal[string] keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[voltage_file] ): keyword[if] keyword[not] identifier[silent] : identifier[print] ( literal[string] ) identifier[self] . identifier[read_voltages] ( identifier[voltage_file] ) identifier[sens_files] = identifier[sorted] ( identifier[glob] ( identifier[directory] + identifier[os] . identifier[sep] + literal[string] + identifier[os] . identifier[sep] + literal[string] ) ) keyword[if] ( identifier[len] ( identifier[sens_files] )> literal[int] keyword[and] identifier[len] ( identifier[sens_files] )== identifier[self] . identifier[configs] . identifier[nr_of_configs] ): identifier[print] ( literal[string] ) identifier[self] . identifier[_read_sensitivities] ( identifier[directory] + identifier[os] . identifier[sep] + literal[string] ) identifier[pot_files] = identifier[sorted] ( identifier[glob] ( identifier[directory] + identifier[os] . identifier[sep] + literal[string] + identifier[os] . identifier[sep] + literal[string] ) ) keyword[if] ( identifier[len] ( identifier[pot_files] )> literal[int] keyword[and] identifier[len] ( identifier[pot_files] )== identifier[self] . identifier[configs] . identifier[nr_of_configs] ): identifier[print] ( literal[string] ) identifier[self] . identifier[_read_potentials] ( identifier[directory] + identifier[os] . identifier[sep] + literal[string] )
def _read_modeling_results(self, directory, silent=False): """Read modeling results from a given mod/ directory. Possible values to read in are: * voltages * potentials * sensitivities """ voltage_file = directory + os.sep + 'volt.dat' if os.path.isfile(voltage_file): if not silent: print('reading voltages') # depends on [control=['if'], data=[]] self.read_voltages(voltage_file) # depends on [control=['if'], data=[]] sens_files = sorted(glob(directory + os.sep + 'sens' + os.sep + 'sens*.dat')) # check if there are sensitivity files, and that the nr corresponds to # the nr of configs if len(sens_files) > 0 and len(sens_files) == self.configs.nr_of_configs: print('reading sensitivities') self._read_sensitivities(directory + os.sep + 'sens') # depends on [control=['if'], data=[]] # same for potentials pot_files = sorted(glob(directory + os.sep + 'pot' + os.sep + 'pot*.dat')) # check if there are sensitivity files, and that the nr corresponds to # the nr of configs if len(pot_files) > 0 and len(pot_files) == self.configs.nr_of_configs: print('reading potentials') self._read_potentials(directory + os.sep + 'pot') # depends on [control=['if'], data=[]]
def destroy(instances, opts=None): ''' Destroy the named vm(s) ''' client = _get_client() if isinstance(opts, dict): client.opts.update(opts) info = client.destroy(instances) return info
def function[destroy, parameter[instances, opts]]: constant[ Destroy the named vm(s) ] variable[client] assign[=] call[name[_get_client], parameter[]] if call[name[isinstance], parameter[name[opts], name[dict]]] begin[:] call[name[client].opts.update, parameter[name[opts]]] variable[info] assign[=] call[name[client].destroy, parameter[name[instances]]] return[name[info]]
keyword[def] identifier[destroy] ( identifier[instances] , identifier[opts] = keyword[None] ): literal[string] identifier[client] = identifier[_get_client] () keyword[if] identifier[isinstance] ( identifier[opts] , identifier[dict] ): identifier[client] . identifier[opts] . identifier[update] ( identifier[opts] ) identifier[info] = identifier[client] . identifier[destroy] ( identifier[instances] ) keyword[return] identifier[info]
def destroy(instances, opts=None): """ Destroy the named vm(s) """ client = _get_client() if isinstance(opts, dict): client.opts.update(opts) # depends on [control=['if'], data=[]] info = client.destroy(instances) return info
def cd(self, *subpaths): """ Change the current working directory and update all the paths in the workspace. This is useful for commands that have to be run from a certain directory. """ target = os.path.join(*subpaths) os.chdir(target)
def function[cd, parameter[self]]: constant[ Change the current working directory and update all the paths in the workspace. This is useful for commands that have to be run from a certain directory. ] variable[target] assign[=] call[name[os].path.join, parameter[<ast.Starred object at 0x7da1b09e8d00>]] call[name[os].chdir, parameter[name[target]]]
keyword[def] identifier[cd] ( identifier[self] ,* identifier[subpaths] ): literal[string] identifier[target] = identifier[os] . identifier[path] . identifier[join] (* identifier[subpaths] ) identifier[os] . identifier[chdir] ( identifier[target] )
def cd(self, *subpaths): """ Change the current working directory and update all the paths in the workspace. This is useful for commands that have to be run from a certain directory. """ target = os.path.join(*subpaths) os.chdir(target)
def _get_edge_dict(self): """Return a dict of edges. Keyed tuples of (i, source, target, polarity) with lists of edge ids [id1, id2, ...] """ edge_dict = collections.defaultdict(lambda: []) if len(self._edges) > 0: for e in self._edges: data = e['data'] key = tuple([data['i'], data['source'], data['target'], data['polarity']]) edge_dict[key] = data['id'] return edge_dict
def function[_get_edge_dict, parameter[self]]: constant[Return a dict of edges. Keyed tuples of (i, source, target, polarity) with lists of edge ids [id1, id2, ...] ] variable[edge_dict] assign[=] call[name[collections].defaultdict, parameter[<ast.Lambda object at 0x7da18bcc98d0>]] if compare[call[name[len], parameter[name[self]._edges]] greater[>] constant[0]] begin[:] for taget[name[e]] in starred[name[self]._edges] begin[:] variable[data] assign[=] call[name[e]][constant[data]] variable[key] assign[=] call[name[tuple], parameter[list[[<ast.Subscript object at 0x7da20c76f760>, <ast.Subscript object at 0x7da20c76da80>, <ast.Subscript object at 0x7da20c76e860>, <ast.Subscript object at 0x7da20c76dab0>]]]] call[name[edge_dict]][name[key]] assign[=] call[name[data]][constant[id]] return[name[edge_dict]]
keyword[def] identifier[_get_edge_dict] ( identifier[self] ): literal[string] identifier[edge_dict] = identifier[collections] . identifier[defaultdict] ( keyword[lambda] :[]) keyword[if] identifier[len] ( identifier[self] . identifier[_edges] )> literal[int] : keyword[for] identifier[e] keyword[in] identifier[self] . identifier[_edges] : identifier[data] = identifier[e] [ literal[string] ] identifier[key] = identifier[tuple] ([ identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ], identifier[data] [ literal[string] ]]) identifier[edge_dict] [ identifier[key] ]= identifier[data] [ literal[string] ] keyword[return] identifier[edge_dict]
def _get_edge_dict(self): """Return a dict of edges. Keyed tuples of (i, source, target, polarity) with lists of edge ids [id1, id2, ...] """ edge_dict = collections.defaultdict(lambda : []) if len(self._edges) > 0: for e in self._edges: data = e['data'] key = tuple([data['i'], data['source'], data['target'], data['polarity']]) edge_dict[key] = data['id'] # depends on [control=['for'], data=['e']] # depends on [control=['if'], data=[]] return edge_dict
def ParseConfigCommandLine(): """Parse all the command line options which control the config system.""" # The user may specify the primary config file on the command line. if flags.FLAGS.config: _CONFIG.Initialize(filename=flags.FLAGS.config, must_exist=True) else: raise RuntimeError("A config file is not specified.") # Allow secondary configuration files to be specified. if flags.FLAGS.secondary_configs: for config_file in flags.FLAGS.secondary_configs: _CONFIG.LoadSecondaryConfig(config_file) # Allow individual options to be specified as global overrides. for statement in flags.FLAGS.parameter: if "=" not in statement: raise RuntimeError("statement %s on command line not valid." % statement) name, value = statement.split("=", 1) _CONFIG.global_override[name] = value # Load additional contexts from the command line. for context in flags.FLAGS.context: if context: _CONFIG.AddContext(context) if _CONFIG["Config.writeback"]: _CONFIG.SetWriteBack(_CONFIG["Config.writeback"]) # Does the user want to dump help? We do this after the config system is # initialized so the user can examine what we think the value of all the # parameters are. if flags.FLAGS.config_help: print("Configuration overview.") _CONFIG.PrintHelp() sys.exit(0)
def function[ParseConfigCommandLine, parameter[]]: constant[Parse all the command line options which control the config system.] if name[flags].FLAGS.config begin[:] call[name[_CONFIG].Initialize, parameter[]] if name[flags].FLAGS.secondary_configs begin[:] for taget[name[config_file]] in starred[name[flags].FLAGS.secondary_configs] begin[:] call[name[_CONFIG].LoadSecondaryConfig, parameter[name[config_file]]] for taget[name[statement]] in starred[name[flags].FLAGS.parameter] begin[:] if compare[constant[=] <ast.NotIn object at 0x7da2590d7190> name[statement]] begin[:] <ast.Raise object at 0x7da1b1b44580> <ast.Tuple object at 0x7da1b1b47c40> assign[=] call[name[statement].split, parameter[constant[=], constant[1]]] call[name[_CONFIG].global_override][name[name]] assign[=] name[value] for taget[name[context]] in starred[name[flags].FLAGS.context] begin[:] if name[context] begin[:] call[name[_CONFIG].AddContext, parameter[name[context]]] if call[name[_CONFIG]][constant[Config.writeback]] begin[:] call[name[_CONFIG].SetWriteBack, parameter[call[name[_CONFIG]][constant[Config.writeback]]]] if name[flags].FLAGS.config_help begin[:] call[name[print], parameter[constant[Configuration overview.]]] call[name[_CONFIG].PrintHelp, parameter[]] call[name[sys].exit, parameter[constant[0]]]
keyword[def] identifier[ParseConfigCommandLine] (): literal[string] keyword[if] identifier[flags] . identifier[FLAGS] . identifier[config] : identifier[_CONFIG] . identifier[Initialize] ( identifier[filename] = identifier[flags] . identifier[FLAGS] . identifier[config] , identifier[must_exist] = keyword[True] ) keyword[else] : keyword[raise] identifier[RuntimeError] ( literal[string] ) keyword[if] identifier[flags] . identifier[FLAGS] . identifier[secondary_configs] : keyword[for] identifier[config_file] keyword[in] identifier[flags] . identifier[FLAGS] . identifier[secondary_configs] : identifier[_CONFIG] . identifier[LoadSecondaryConfig] ( identifier[config_file] ) keyword[for] identifier[statement] keyword[in] identifier[flags] . identifier[FLAGS] . identifier[parameter] : keyword[if] literal[string] keyword[not] keyword[in] identifier[statement] : keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[statement] ) identifier[name] , identifier[value] = identifier[statement] . identifier[split] ( literal[string] , literal[int] ) identifier[_CONFIG] . identifier[global_override] [ identifier[name] ]= identifier[value] keyword[for] identifier[context] keyword[in] identifier[flags] . identifier[FLAGS] . identifier[context] : keyword[if] identifier[context] : identifier[_CONFIG] . identifier[AddContext] ( identifier[context] ) keyword[if] identifier[_CONFIG] [ literal[string] ]: identifier[_CONFIG] . identifier[SetWriteBack] ( identifier[_CONFIG] [ literal[string] ]) keyword[if] identifier[flags] . identifier[FLAGS] . identifier[config_help] : identifier[print] ( literal[string] ) identifier[_CONFIG] . identifier[PrintHelp] () identifier[sys] . identifier[exit] ( literal[int] )
def ParseConfigCommandLine(): """Parse all the command line options which control the config system.""" # The user may specify the primary config file on the command line. if flags.FLAGS.config: _CONFIG.Initialize(filename=flags.FLAGS.config, must_exist=True) # depends on [control=['if'], data=[]] else: raise RuntimeError('A config file is not specified.') # Allow secondary configuration files to be specified. if flags.FLAGS.secondary_configs: for config_file in flags.FLAGS.secondary_configs: _CONFIG.LoadSecondaryConfig(config_file) # depends on [control=['for'], data=['config_file']] # depends on [control=['if'], data=[]] # Allow individual options to be specified as global overrides. for statement in flags.FLAGS.parameter: if '=' not in statement: raise RuntimeError('statement %s on command line not valid.' % statement) # depends on [control=['if'], data=['statement']] (name, value) = statement.split('=', 1) _CONFIG.global_override[name] = value # depends on [control=['for'], data=['statement']] # Load additional contexts from the command line. for context in flags.FLAGS.context: if context: _CONFIG.AddContext(context) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['context']] if _CONFIG['Config.writeback']: _CONFIG.SetWriteBack(_CONFIG['Config.writeback']) # depends on [control=['if'], data=[]] # Does the user want to dump help? We do this after the config system is # initialized so the user can examine what we think the value of all the # parameters are. if flags.FLAGS.config_help: print('Configuration overview.') _CONFIG.PrintHelp() sys.exit(0) # depends on [control=['if'], data=[]]
def check_sp_certs(self): """ Checks if the x509 certs of the SP exists and are valid. :returns: If the x509 certs of the SP exists and are valid :rtype: boolean """ key = self.get_sp_key() cert = self.get_sp_cert() return key is not None and cert is not None
def function[check_sp_certs, parameter[self]]: constant[ Checks if the x509 certs of the SP exists and are valid. :returns: If the x509 certs of the SP exists and are valid :rtype: boolean ] variable[key] assign[=] call[name[self].get_sp_key, parameter[]] variable[cert] assign[=] call[name[self].get_sp_cert, parameter[]] return[<ast.BoolOp object at 0x7da1b18a1db0>]
keyword[def] identifier[check_sp_certs] ( identifier[self] ): literal[string] identifier[key] = identifier[self] . identifier[get_sp_key] () identifier[cert] = identifier[self] . identifier[get_sp_cert] () keyword[return] identifier[key] keyword[is] keyword[not] keyword[None] keyword[and] identifier[cert] keyword[is] keyword[not] keyword[None]
def check_sp_certs(self): """ Checks if the x509 certs of the SP exists and are valid. :returns: If the x509 certs of the SP exists and are valid :rtype: boolean """ key = self.get_sp_key() cert = self.get_sp_cert() return key is not None and cert is not None
def _parse_raw_bytes(raw_bytes): """Convert a string of hexadecimal values to decimal values parameters Example: '0x2E 0xF1 0x80 0x28 0x00 0x1A 0x01 0x00' is converted to: 46, 241, [128, 40, 0, 26, 1, 0] :param raw_bytes: string of hexadecimal values :returns: 3 decimal values """ bytes_list = [int(x, base=16) for x in raw_bytes.split()] return bytes_list[0], bytes_list[1], bytes_list[2:]
def function[_parse_raw_bytes, parameter[raw_bytes]]: constant[Convert a string of hexadecimal values to decimal values parameters Example: '0x2E 0xF1 0x80 0x28 0x00 0x1A 0x01 0x00' is converted to: 46, 241, [128, 40, 0, 26, 1, 0] :param raw_bytes: string of hexadecimal values :returns: 3 decimal values ] variable[bytes_list] assign[=] <ast.ListComp object at 0x7da1b191f340> return[tuple[[<ast.Subscript object at 0x7da1b191c040>, <ast.Subscript object at 0x7da1b191ec20>, <ast.Subscript object at 0x7da1b191f880>]]]
keyword[def] identifier[_parse_raw_bytes] ( identifier[raw_bytes] ): literal[string] identifier[bytes_list] =[ identifier[int] ( identifier[x] , identifier[base] = literal[int] ) keyword[for] identifier[x] keyword[in] identifier[raw_bytes] . identifier[split] ()] keyword[return] identifier[bytes_list] [ literal[int] ], identifier[bytes_list] [ literal[int] ], identifier[bytes_list] [ literal[int] :]
def _parse_raw_bytes(raw_bytes): """Convert a string of hexadecimal values to decimal values parameters Example: '0x2E 0xF1 0x80 0x28 0x00 0x1A 0x01 0x00' is converted to: 46, 241, [128, 40, 0, 26, 1, 0] :param raw_bytes: string of hexadecimal values :returns: 3 decimal values """ bytes_list = [int(x, base=16) for x in raw_bytes.split()] return (bytes_list[0], bytes_list[1], bytes_list[2:])
def handle(self, line_info): """Execute magic functions.""" ifun = line_info.ifun the_rest = line_info.the_rest cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace, (ifun + " " + the_rest)) return cmd
def function[handle, parameter[self, line_info]]: constant[Execute magic functions.] variable[ifun] assign[=] name[line_info].ifun variable[the_rest] assign[=] name[line_info].the_rest variable[cmd] assign[=] binary_operation[constant[%sget_ipython().magic(%r)] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b021ebc0>, <ast.BinOp object at 0x7da1b021d480>]]] return[name[cmd]]
keyword[def] identifier[handle] ( identifier[self] , identifier[line_info] ): literal[string] identifier[ifun] = identifier[line_info] . identifier[ifun] identifier[the_rest] = identifier[line_info] . identifier[the_rest] identifier[cmd] = literal[string] %( identifier[line_info] . identifier[pre_whitespace] , ( identifier[ifun] + literal[string] + identifier[the_rest] )) keyword[return] identifier[cmd]
def handle(self, line_info): """Execute magic functions.""" ifun = line_info.ifun the_rest = line_info.the_rest cmd = '%sget_ipython().magic(%r)' % (line_info.pre_whitespace, ifun + ' ' + the_rest) return cmd