body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
0aa7e6169c6396f2041a3f03266b66daf17a554b61426925bbaecff52c2067ad
def get_data(self): '\n Load the data from the database into a dataframe and do the necessary\n transformations to get the features as a list :return:\n ' self.data = self.load() if self.training_conf.max_samples_per_host: counts = self.data.groupby('target').count() counts = counts.withColumn('fraction', (self.training_conf.max_samples_per_host / F.col('count'))) fractions = dict(counts.select('target', 'fraction').collect()) for (key, value) in fractions.items(): if (value > 1.0): fractions[key] = 1.0 self.data = self.data.sampleBy('target', fractions, 777) schema = self.spark.read.json(self.data.limit(1).rdd.map((lambda row: row.features))).schema for feature in self.model.features: if (feature in schema.fieldNames()): continue feature_class = self.engine_conf.all_features[feature] schema.add(StructField(name=feature, dataType=feature_class.spark_type(), nullable=True)) self.data = self.data.withColumn('features', F.from_json('features', schema)) self.training_row_n = self.data.count() self.logger.debug(f'Loaded #{self.training_row_n} of request sets...')
Load the data from the database into a dataframe and do the necessary transformations to get the features as a list :return:
src/baskerville/models/pipeline_training.py
get_data
deflect-ca/baskerville
2
python
def get_data(self): '\n Load the data from the database into a dataframe and do the necessary\n transformations to get the features as a list :return:\n ' self.data = self.load() if self.training_conf.max_samples_per_host: counts = self.data.groupby('target').count() counts = counts.withColumn('fraction', (self.training_conf.max_samples_per_host / F.col('count'))) fractions = dict(counts.select('target', 'fraction').collect()) for (key, value) in fractions.items(): if (value > 1.0): fractions[key] = 1.0 self.data = self.data.sampleBy('target', fractions, 777) schema = self.spark.read.json(self.data.limit(1).rdd.map((lambda row: row.features))).schema for feature in self.model.features: if (feature in schema.fieldNames()): continue feature_class = self.engine_conf.all_features[feature] schema.add(StructField(name=feature, dataType=feature_class.spark_type(), nullable=True)) self.data = self.data.withColumn('features', F.from_json('features', schema)) self.training_row_n = self.data.count() self.logger.debug(f'Loaded #{self.training_row_n} of request sets...')
def get_data(self): '\n Load the data from the database into a dataframe and do the necessary\n transformations to get the features as a list :return:\n ' self.data = self.load() if self.training_conf.max_samples_per_host: counts = self.data.groupby('target').count() counts = counts.withColumn('fraction', (self.training_conf.max_samples_per_host / F.col('count'))) fractions = dict(counts.select('target', 'fraction').collect()) for (key, value) in fractions.items(): if (value > 1.0): fractions[key] = 1.0 self.data = self.data.sampleBy('target', fractions, 777) schema = self.spark.read.json(self.data.limit(1).rdd.map((lambda row: row.features))).schema for feature in self.model.features: if (feature in schema.fieldNames()): continue feature_class = self.engine_conf.all_features[feature] schema.add(StructField(name=feature, dataType=feature_class.spark_type(), nullable=True)) self.data = self.data.withColumn('features', F.from_json('features', schema)) self.training_row_n = self.data.count() self.logger.debug(f'Loaded #{self.training_row_n} of request sets...')<|docstring|>Load the data from the database into a dataframe and do the necessary transformations to get the features as a list :return:<|endoftext|>
2ec1ef2d8a69cf680424354e117fc047a84ed31fbb42f6416313beac2590c2ed
def train(self): '\n Vectorize the features and train on the loaded data\n Todo: train-test split:\n # self.train_data, self.test_data = self.data.randomSplit(\n # [0.9, 0.1], seed=RANDOM_SEED\n # )\n :return: None\n ' self.model.train(self.data) self.data.unpersist()
Vectorize the features and train on the loaded data Todo: train-test split: # self.train_data, self.test_data = self.data.randomSplit( # [0.9, 0.1], seed=RANDOM_SEED # ) :return: None
src/baskerville/models/pipeline_training.py
train
deflect-ca/baskerville
2
python
def train(self): '\n Vectorize the features and train on the loaded data\n Todo: train-test split:\n # self.train_data, self.test_data = self.data.randomSplit(\n # [0.9, 0.1], seed=RANDOM_SEED\n # )\n :return: None\n ' self.model.train(self.data) self.data.unpersist()
def train(self): '\n Vectorize the features and train on the loaded data\n Todo: train-test split:\n # self.train_data, self.test_data = self.data.randomSplit(\n # [0.9, 0.1], seed=RANDOM_SEED\n # )\n :return: None\n ' self.model.train(self.data) self.data.unpersist()<|docstring|>Vectorize the features and train on the loaded data Todo: train-test split: # self.train_data, self.test_data = self.data.randomSplit( # [0.9, 0.1], seed=RANDOM_SEED # ) :return: None<|endoftext|>
62d6973952df9368ff7464b1fff46a8eb850a783c570d8af8178abfdf812151b
def test(self): '\n # todo\n :return:\n ' self.logger.debug('Testing: Coming soon-ish...')
# todo :return:
src/baskerville/models/pipeline_training.py
test
deflect-ca/baskerville
2
python
def test(self): '\n # todo\n :return:\n ' self.logger.debug('Testing: Coming soon-ish...')
def test(self): '\n # todo\n :return:\n ' self.logger.debug('Testing: Coming soon-ish...')<|docstring|># todo :return:<|endoftext|>
88fa60afb54e114c0f2412cd58b55f17b5961b753383b413cb71850f34ca4674
def evaluate(self): '\n # todo\n :return:\n ' self.logger.debug('Evaluating: Coming soon-ish...')
# todo :return:
src/baskerville/models/pipeline_training.py
evaluate
deflect-ca/baskerville
2
python
def evaluate(self): '\n # todo\n :return:\n ' self.logger.debug('Evaluating: Coming soon-ish...')
def evaluate(self): '\n # todo\n :return:\n ' self.logger.debug('Evaluating: Coming soon-ish...')<|docstring|># todo :return:<|endoftext|>
7c61968237d45ab30801be5fc092d974cfa5c929805ead2b96c86479818deb45
def save(self): '\n Save the models on disc and add a baskerville.db.Model in the database\n :return: None\n ' model_path = get_model_path(self.engine_conf.storage_path, self.model.__class__.__name__) self.model.save(path=model_path, spark_session=self.spark) self.logger.debug(f'The new model has been saved to: {model_path}') db_model = Model() db_model.created_at = datetime.datetime.now(tz=tzutc()) db_model.algorithm = self.training_conf.model db_model.parameters = json.dumps(self.model.get_params()) db_model.classifier = bytearray(model_path.encode('utf8')) self.db_tools.session.add(db_model) self.db_tools.session.commit()
Save the models on disc and add a baskerville.db.Model in the database :return: None
src/baskerville/models/pipeline_training.py
save
deflect-ca/baskerville
2
python
def save(self): '\n Save the models on disc and add a baskerville.db.Model in the database\n :return: None\n ' model_path = get_model_path(self.engine_conf.storage_path, self.model.__class__.__name__) self.model.save(path=model_path, spark_session=self.spark) self.logger.debug(f'The new model has been saved to: {model_path}') db_model = Model() db_model.created_at = datetime.datetime.now(tz=tzutc()) db_model.algorithm = self.training_conf.model db_model.parameters = json.dumps(self.model.get_params()) db_model.classifier = bytearray(model_path.encode('utf8')) self.db_tools.session.add(db_model) self.db_tools.session.commit()
def save(self): '\n Save the models on disc and add a baskerville.db.Model in the database\n :return: None\n ' model_path = get_model_path(self.engine_conf.storage_path, self.model.__class__.__name__) self.model.save(path=model_path, spark_session=self.spark) self.logger.debug(f'The new model has been saved to: {model_path}') db_model = Model() db_model.created_at = datetime.datetime.now(tz=tzutc()) db_model.algorithm = self.training_conf.model db_model.parameters = json.dumps(self.model.get_params()) db_model.classifier = bytearray(model_path.encode('utf8')) self.db_tools.session.add(db_model) self.db_tools.session.commit()<|docstring|>Save the models on disc and add a baskerville.db.Model in the database :return: None<|endoftext|>
9d7341df11ae196e1a11c594261d9d4755bb8b8394d4b5c049c363b4a504566b
def get_bounds(self, from_date, to_date=None, field='stop'): '\n Get the lower and upper limit\n :param str from_date: lower date bound\n :param str to_date: upper date bound\n :param str field: date field\n :return:\n ' where = f"{field}>='{from_date}' " if to_date: where += f"AND {field}<='{to_date}' " q = f'(select min(id) as min_id, max(id) as max_id, count(id) as rows from request_sets where {where}) as bounds' return self.spark.read.jdbc(url=self.db_url, table=q, properties=self.conn_properties)
Get the lower and upper limit :param str from_date: lower date bound :param str to_date: upper date bound :param str field: date field :return:
src/baskerville/models/pipeline_training.py
get_bounds
deflect-ca/baskerville
2
python
def get_bounds(self, from_date, to_date=None, field='stop'): '\n Get the lower and upper limit\n :param str from_date: lower date bound\n :param str to_date: upper date bound\n :param str field: date field\n :return:\n ' where = f"{field}>='{from_date}' " if to_date: where += f"AND {field}<='{to_date}' " q = f'(select min(id) as min_id, max(id) as max_id, count(id) as rows from request_sets where {where}) as bounds' return self.spark.read.jdbc(url=self.db_url, table=q, properties=self.conn_properties)
def get_bounds(self, from_date, to_date=None, field='stop'): '\n Get the lower and upper limit\n :param str from_date: lower date bound\n :param str to_date: upper date bound\n :param str field: date field\n :return:\n ' where = f"{field}>='{from_date}' " if to_date: where += f"AND {field}<='{to_date}' " q = f'(select min(id) as min_id, max(id) as max_id, count(id) as rows from request_sets where {where}) as bounds' return self.spark.read.jdbc(url=self.db_url, table=q, properties=self.conn_properties)<|docstring|>Get the lower and upper limit :param str from_date: lower date bound :param str to_date: upper date bound :param str field: date field :return:<|endoftext|>
bd5b9cffb3561356dba6aa550cd08d468b5b3c14c9a1632912cb0f40e916876a
def load(self) -> pyspark.sql.DataFrame: '\n Loads the request_sets already in the database\n :return:\n :rtype: pyspark.sql.Dataframe\n ' data_params = self.training_conf.data_parameters from_date = data_params.get('from_date') to_date = data_params.get('to_date') training_days = data_params.get('training_days') if ((not from_date) or (not to_date)): if training_days: to_date = datetime.datetime.utcnow() from_date = str((to_date - datetime.timedelta(days=training_days))) to_date = str(to_date) else: raise ValueError('Please specify either from-to dates or training days') bounds = self.get_bounds(from_date, to_date, field='created_at').collect()[0] self.logger.debug(f'Fetching {bounds.rows} rows. min: {bounds.min_id} max: {bounds.max_id}') q = f"(select id, {','.join(self.columns_to_keep)} from request_sets where id >= {bounds.min_id} and id <= {bounds.max_id} and created_at >= '{from_date}' and created_at <='{to_date}') as request_sets" return self.spark.read.jdbc(url=self.db_url, table=q, numPartitions=(int(self.spark.conf.get('spark.sql.shuffle.partitions')) or (os.cpu_count() * 2)), column='id', lowerBound=bounds.min_id, upperBound=(bounds.max_id + 1), properties=self.conn_properties)
Loads the request_sets already in the database :return: :rtype: pyspark.sql.Dataframe
src/baskerville/models/pipeline_training.py
load
deflect-ca/baskerville
2
python
def load(self) -> pyspark.sql.DataFrame: '\n Loads the request_sets already in the database\n :return:\n :rtype: pyspark.sql.Dataframe\n ' data_params = self.training_conf.data_parameters from_date = data_params.get('from_date') to_date = data_params.get('to_date') training_days = data_params.get('training_days') if ((not from_date) or (not to_date)): if training_days: to_date = datetime.datetime.utcnow() from_date = str((to_date - datetime.timedelta(days=training_days))) to_date = str(to_date) else: raise ValueError('Please specify either from-to dates or training days') bounds = self.get_bounds(from_date, to_date, field='created_at').collect()[0] self.logger.debug(f'Fetching {bounds.rows} rows. min: {bounds.min_id} max: {bounds.max_id}') q = f"(select id, {','.join(self.columns_to_keep)} from request_sets where id >= {bounds.min_id} and id <= {bounds.max_id} and created_at >= '{from_date}' and created_at <='{to_date}') as request_sets" return self.spark.read.jdbc(url=self.db_url, table=q, numPartitions=(int(self.spark.conf.get('spark.sql.shuffle.partitions')) or (os.cpu_count() * 2)), column='id', lowerBound=bounds.min_id, upperBound=(bounds.max_id + 1), properties=self.conn_properties)
def load(self) -> pyspark.sql.DataFrame: '\n Loads the request_sets already in the database\n :return:\n :rtype: pyspark.sql.Dataframe\n ' data_params = self.training_conf.data_parameters from_date = data_params.get('from_date') to_date = data_params.get('to_date') training_days = data_params.get('training_days') if ((not from_date) or (not to_date)): if training_days: to_date = datetime.datetime.utcnow() from_date = str((to_date - datetime.timedelta(days=training_days))) to_date = str(to_date) else: raise ValueError('Please specify either from-to dates or training days') bounds = self.get_bounds(from_date, to_date, field='created_at').collect()[0] self.logger.debug(f'Fetching {bounds.rows} rows. min: {bounds.min_id} max: {bounds.max_id}') q = f"(select id, {','.join(self.columns_to_keep)} from request_sets where id >= {bounds.min_id} and id <= {bounds.max_id} and created_at >= '{from_date}' and created_at <='{to_date}') as request_sets" return self.spark.read.jdbc(url=self.db_url, table=q, numPartitions=(int(self.spark.conf.get('spark.sql.shuffle.partitions')) or (os.cpu_count() * 2)), column='id', lowerBound=bounds.min_id, upperBound=(bounds.max_id + 1), properties=self.conn_properties)<|docstring|>Loads the request_sets already in the database :return: :rtype: pyspark.sql.Dataframe<|endoftext|>
7d7a1975c5916bb7c1c28b24f2d17808bd4eb0c54d5fb97c7460d9d4741f500f
def finish_up(self): '\n Unpersist all\n :return:\n ' reset_spark_storage() if self.db_tools: self.db_tools.disconnect_from_db()
Unpersist all :return:
src/baskerville/models/pipeline_training.py
finish_up
deflect-ca/baskerville
2
python
def finish_up(self): '\n Unpersist all\n :return:\n ' reset_spark_storage() if self.db_tools: self.db_tools.disconnect_from_db()
def finish_up(self): '\n Unpersist all\n :return:\n ' reset_spark_storage() if self.db_tools: self.db_tools.disconnect_from_db()<|docstring|>Unpersist all :return:<|endoftext|>
2aa05334cc06b29798352c3a9c94d00075b8e0b721d27b66406c36575e3b2ff0
def get_input(prompt: str, default_value: str, config: Any=None, property_name: str=None) -> str: "Request user input, updating the underlying config if applicable.\n\n Args:\n prompt: On-screen prompt before user input\n default_value: The default (existing) value\n config: BinaryAlertConfig instance, if updating the underlying configuration\n If None, the valid values are assumed to be 'yes' and 'no'\n property_name: Name of the config property to update (applicable only if config != None)\n\n Returns:\n Lowercase user input, stripped of extra spaces, or the default value if no input was given\n " if default_value: prompt = '{} ({}): '.format(prompt, default_value) else: prompt = '{}: '.format(prompt) while True: user_input = (input(prompt).strip().lower() or default_value) if (config and property_name): try: setattr(config, property_name, user_input) break except InvalidConfigError as error: print('ERROR: {}'.format(error)) elif (user_input in {'yes', 'no'}): break else: print('ERROR: Please enter exactly "yes" or "no"') return user_input
Request user input, updating the underlying config if applicable. Args: prompt: On-screen prompt before user input default_value: The default (existing) value config: BinaryAlertConfig instance, if updating the underlying configuration If None, the valid values are assumed to be 'yes' and 'no' property_name: Name of the config property to update (applicable only if config != None) Returns: Lowercase user input, stripped of extra spaces, or the default value if no input was given
cli/config.py
get_input
wkk/binaryalert
1,324
python
def get_input(prompt: str, default_value: str, config: Any=None, property_name: str=None) -> str: "Request user input, updating the underlying config if applicable.\n\n Args:\n prompt: On-screen prompt before user input\n default_value: The default (existing) value\n config: BinaryAlertConfig instance, if updating the underlying configuration\n If None, the valid values are assumed to be 'yes' and 'no'\n property_name: Name of the config property to update (applicable only if config != None)\n\n Returns:\n Lowercase user input, stripped of extra spaces, or the default value if no input was given\n " if default_value: prompt = '{} ({}): '.format(prompt, default_value) else: prompt = '{}: '.format(prompt) while True: user_input = (input(prompt).strip().lower() or default_value) if (config and property_name): try: setattr(config, property_name, user_input) break except InvalidConfigError as error: print('ERROR: {}'.format(error)) elif (user_input in {'yes', 'no'}): break else: print('ERROR: Please enter exactly "yes" or "no"') return user_input
def get_input(prompt: str, default_value: str, config: Any=None, property_name: str=None) -> str: "Request user input, updating the underlying config if applicable.\n\n Args:\n prompt: On-screen prompt before user input\n default_value: The default (existing) value\n config: BinaryAlertConfig instance, if updating the underlying configuration\n If None, the valid values are assumed to be 'yes' and 'no'\n property_name: Name of the config property to update (applicable only if config != None)\n\n Returns:\n Lowercase user input, stripped of extra spaces, or the default value if no input was given\n " if default_value: prompt = '{} ({}): '.format(prompt, default_value) else: prompt = '{}: '.format(prompt) while True: user_input = (input(prompt).strip().lower() or default_value) if (config and property_name): try: setattr(config, property_name, user_input) break except InvalidConfigError as error: print('ERROR: {}'.format(error)) elif (user_input in {'yes', 'no'}): break else: print('ERROR: Please enter exactly "yes" or "no"') return user_input<|docstring|>Request user input, updating the underlying config if applicable. Args: prompt: On-screen prompt before user input default_value: The default (existing) value config: BinaryAlertConfig instance, if updating the underlying configuration If None, the valid values are assumed to be 'yes' and 'no' property_name: Name of the config property to update (applicable only if config != None) Returns: Lowercase user input, stripped of extra spaces, or the default value if no input was given<|endoftext|>
05ad6d2b56f6b1d7555633ee1001ff3828299d8d850cb149db120616f61a1a6d
def __init__(self) -> None: 'Parse the terraform.tfvars config file and make sure it contains every variable.\n\n Raises:\n InvalidConfigError: If any variable is defined in variables.tf but not terraform.tfvars.\n ' with open(CONFIG_FILE) as f: self._config = hcl.load(f) with open(VARIABLES_FILE) as f: variable_names = hcl.load(f)['variable'].keys() for variable in variable_names: if (variable not in self._config): raise InvalidConfigError('variable "{}" is not defined in {}'.format(variable, CONFIG_FILE))
Parse the terraform.tfvars config file and make sure it contains every variable. Raises: InvalidConfigError: If any variable is defined in variables.tf but not terraform.tfvars.
cli/config.py
__init__
wkk/binaryalert
1,324
python
def __init__(self) -> None: 'Parse the terraform.tfvars config file and make sure it contains every variable.\n\n Raises:\n InvalidConfigError: If any variable is defined in variables.tf but not terraform.tfvars.\n ' with open(CONFIG_FILE) as f: self._config = hcl.load(f) with open(VARIABLES_FILE) as f: variable_names = hcl.load(f)['variable'].keys() for variable in variable_names: if (variable not in self._config): raise InvalidConfigError('variable "{}" is not defined in {}'.format(variable, CONFIG_FILE))
def __init__(self) -> None: 'Parse the terraform.tfvars config file and make sure it contains every variable.\n\n Raises:\n InvalidConfigError: If any variable is defined in variables.tf but not terraform.tfvars.\n ' with open(CONFIG_FILE) as f: self._config = hcl.load(f) with open(VARIABLES_FILE) as f: variable_names = hcl.load(f)['variable'].keys() for variable in variable_names: if (variable not in self._config): raise InvalidConfigError('variable "{}" is not defined in {}'.format(variable, CONFIG_FILE))<|docstring|>Parse the terraform.tfvars config file and make sure it contains every variable. Raises: InvalidConfigError: If any variable is defined in variables.tf but not terraform.tfvars.<|endoftext|>
d18505a1e76fe2f4c467277ae98527f313084350f37189b57981070835741e57
def _encrypt_cb_api_token(self) -> None: 'Save an encrypted CarbonBlack API token.\n\n This Terraforms the KMS keys required to encrypt the token.\n ' while True: api_token = getpass.getpass('CarbonBlack API token (only needs binary read access): ').strip().lower() if re.fullmatch(self.VALID_CB_API_TOKEN_FORMAT, api_token, re.ASCII): break else: print('ERROR: {}-character input does not match expected token format {}'.format(len(api_token), self.VALID_CB_API_TOKEN_FORMAT)) print('Terraforming KMS key...') os.chdir(TERRAFORM_DIR) subprocess.check_call(['terraform', 'init']) subprocess.check_call(['terraform', 'apply', '-target=aws_kms_alias.encrypt_credentials_alias']) print('Encrypting API token...') response = boto3.client('kms').encrypt(KeyId='alias/{}_binaryalert_carbonblack_credentials'.format(self.name_prefix), Plaintext=api_token) self.encrypted_carbon_black_api_token = base64.b64encode(response['CiphertextBlob']).decode('utf-8')
Save an encrypted CarbonBlack API token. This Terraforms the KMS keys required to encrypt the token.
cli/config.py
_encrypt_cb_api_token
wkk/binaryalert
1,324
python
def _encrypt_cb_api_token(self) -> None: 'Save an encrypted CarbonBlack API token.\n\n This Terraforms the KMS keys required to encrypt the token.\n ' while True: api_token = getpass.getpass('CarbonBlack API token (only needs binary read access): ').strip().lower() if re.fullmatch(self.VALID_CB_API_TOKEN_FORMAT, api_token, re.ASCII): break else: print('ERROR: {}-character input does not match expected token format {}'.format(len(api_token), self.VALID_CB_API_TOKEN_FORMAT)) print('Terraforming KMS key...') os.chdir(TERRAFORM_DIR) subprocess.check_call(['terraform', 'init']) subprocess.check_call(['terraform', 'apply', '-target=aws_kms_alias.encrypt_credentials_alias']) print('Encrypting API token...') response = boto3.client('kms').encrypt(KeyId='alias/{}_binaryalert_carbonblack_credentials'.format(self.name_prefix), Plaintext=api_token) self.encrypted_carbon_black_api_token = base64.b64encode(response['CiphertextBlob']).decode('utf-8')
def _encrypt_cb_api_token(self) -> None: 'Save an encrypted CarbonBlack API token.\n\n This Terraforms the KMS keys required to encrypt the token.\n ' while True: api_token = getpass.getpass('CarbonBlack API token (only needs binary read access): ').strip().lower() if re.fullmatch(self.VALID_CB_API_TOKEN_FORMAT, api_token, re.ASCII): break else: print('ERROR: {}-character input does not match expected token format {}'.format(len(api_token), self.VALID_CB_API_TOKEN_FORMAT)) print('Terraforming KMS key...') os.chdir(TERRAFORM_DIR) subprocess.check_call(['terraform', 'init']) subprocess.check_call(['terraform', 'apply', '-target=aws_kms_alias.encrypt_credentials_alias']) print('Encrypting API token...') response = boto3.client('kms').encrypt(KeyId='alias/{}_binaryalert_carbonblack_credentials'.format(self.name_prefix), Plaintext=api_token) self.encrypted_carbon_black_api_token = base64.b64encode(response['CiphertextBlob']).decode('utf-8')<|docstring|>Save an encrypted CarbonBlack API token. This Terraforms the KMS keys required to encrypt the token.<|endoftext|>
2a78d24eac3a24c009ce629322934c9d2d9c2f75a9ee445b23d4f63b4f64fae1
def _configure_carbon_black(self) -> None: 'If CarbonBlack downloader is enabled, request URL and credentials' get_input('CarbonBlack URL', self.carbon_black_url, self, 'carbon_black_url') update_api_token = 'yes' if self.encrypted_carbon_black_api_token: update_api_token = get_input('Change the CarbonBlack API token?', 'no') if (update_api_token == 'yes'): self.save() self._encrypt_cb_api_token()
If CarbonBlack downloader is enabled, request URL and credentials
cli/config.py
_configure_carbon_black
wkk/binaryalert
1,324
python
def _configure_carbon_black(self) -> None: get_input('CarbonBlack URL', self.carbon_black_url, self, 'carbon_black_url') update_api_token = 'yes' if self.encrypted_carbon_black_api_token: update_api_token = get_input('Change the CarbonBlack API token?', 'no') if (update_api_token == 'yes'): self.save() self._encrypt_cb_api_token()
def _configure_carbon_black(self) -> None: get_input('CarbonBlack URL', self.carbon_black_url, self, 'carbon_black_url') update_api_token = 'yes' if self.encrypted_carbon_black_api_token: update_api_token = get_input('Change the CarbonBlack API token?', 'no') if (update_api_token == 'yes'): self.save() self._encrypt_cb_api_token()<|docstring|>If CarbonBlack downloader is enabled, request URL and credentials<|endoftext|>
f742333eecba06c7b114ed5593f11880eb55d37d8b42b5e0608e7a15d34024e5
def configure(self) -> None: 'Request basic configuration settings from the user.\n\n Each request will be retried until the answer is in the correct format.\n ' get_input('AWS Account ID', self.aws_account_id, self, 'aws_account_id') get_input('AWS Region', self.aws_region, self, 'aws_region') get_input('Unique name prefix, e.g. "company_team"', self.name_prefix, self, 'name_prefix') enable_downloader = get_input('Enable the CarbonBlack downloader?', ('yes' if self.enable_carbon_black_downloader else 'no')) self.enable_carbon_black_downloader = (enable_downloader == 'yes') if self.enable_carbon_black_downloader: self._configure_carbon_black() self.save()
Request basic configuration settings from the user. Each request will be retried until the answer is in the correct format.
cli/config.py
configure
wkk/binaryalert
1,324
python
def configure(self) -> None: 'Request basic configuration settings from the user.\n\n Each request will be retried until the answer is in the correct format.\n ' get_input('AWS Account ID', self.aws_account_id, self, 'aws_account_id') get_input('AWS Region', self.aws_region, self, 'aws_region') get_input('Unique name prefix, e.g. "company_team"', self.name_prefix, self, 'name_prefix') enable_downloader = get_input('Enable the CarbonBlack downloader?', ('yes' if self.enable_carbon_black_downloader else 'no')) self.enable_carbon_black_downloader = (enable_downloader == 'yes') if self.enable_carbon_black_downloader: self._configure_carbon_black() self.save()
def configure(self) -> None: 'Request basic configuration settings from the user.\n\n Each request will be retried until the answer is in the correct format.\n ' get_input('AWS Account ID', self.aws_account_id, self, 'aws_account_id') get_input('AWS Region', self.aws_region, self, 'aws_region') get_input('Unique name prefix, e.g. "company_team"', self.name_prefix, self, 'name_prefix') enable_downloader = get_input('Enable the CarbonBlack downloader?', ('yes' if self.enable_carbon_black_downloader else 'no')) self.enable_carbon_black_downloader = (enable_downloader == 'yes') if self.enable_carbon_black_downloader: self._configure_carbon_black() self.save()<|docstring|>Request basic configuration settings from the user. Each request will be retried until the answer is in the correct format.<|endoftext|>
9e192af6433eb4522cf869d39d966de47e919a88abb0fc27669f3e7b0da3fc29
def validate(self) -> None: 'Validate config values against their expected formats.\n\n Terraform and AWS have their own validation, but this simple up-front check\n saves the user some headache compared to waiting for a deploy to fail.\n We only explicitly validate variables which the user can change through the CLI:\n aws_region, name_prefix, *carbon_black*\n\n Raises:\n InvalidConfigError: If any config variable has an invalid value.\n ' self.aws_account_id = self.aws_account_id self.aws_region = self.aws_region self.name_prefix = self.name_prefix self.enable_carbon_black_downloader = self.enable_carbon_black_downloader if self.enable_carbon_black_downloader: self.carbon_black_url = self.carbon_black_url self.encrypted_carbon_black_api_token = self.encrypted_carbon_black_api_token
Validate config values against their expected formats. Terraform and AWS have their own validation, but this simple up-front check saves the user some headache compared to waiting for a deploy to fail. We only explicitly validate variables which the user can change through the CLI: aws_region, name_prefix, *carbon_black* Raises: InvalidConfigError: If any config variable has an invalid value.
cli/config.py
validate
wkk/binaryalert
1,324
python
def validate(self) -> None: 'Validate config values against their expected formats.\n\n Terraform and AWS have their own validation, but this simple up-front check\n saves the user some headache compared to waiting for a deploy to fail.\n We only explicitly validate variables which the user can change through the CLI:\n aws_region, name_prefix, *carbon_black*\n\n Raises:\n InvalidConfigError: If any config variable has an invalid value.\n ' self.aws_account_id = self.aws_account_id self.aws_region = self.aws_region self.name_prefix = self.name_prefix self.enable_carbon_black_downloader = self.enable_carbon_black_downloader if self.enable_carbon_black_downloader: self.carbon_black_url = self.carbon_black_url self.encrypted_carbon_black_api_token = self.encrypted_carbon_black_api_token
def validate(self) -> None: 'Validate config values against their expected formats.\n\n Terraform and AWS have their own validation, but this simple up-front check\n saves the user some headache compared to waiting for a deploy to fail.\n We only explicitly validate variables which the user can change through the CLI:\n aws_region, name_prefix, *carbon_black*\n\n Raises:\n InvalidConfigError: If any config variable has an invalid value.\n ' self.aws_account_id = self.aws_account_id self.aws_region = self.aws_region self.name_prefix = self.name_prefix self.enable_carbon_black_downloader = self.enable_carbon_black_downloader if self.enable_carbon_black_downloader: self.carbon_black_url = self.carbon_black_url self.encrypted_carbon_black_api_token = self.encrypted_carbon_black_api_token<|docstring|>Validate config values against their expected formats. Terraform and AWS have their own validation, but this simple up-front check saves the user some headache compared to waiting for a deploy to fail. We only explicitly validate variables which the user can change through the CLI: aws_region, name_prefix, *carbon_black* Raises: InvalidConfigError: If any config variable has an invalid value.<|endoftext|>
2fb76c68a0a6ab21728ba9599f0e6190ac2712b485188226badb55175555eb9b
def save(self) -> None: 'Save the current configuration to the terraform.tfvars config file.' with open(CONFIG_FILE) as config_file: raw_config = config_file.read() for (variable_name, value) in self._config.items(): if isinstance(value, str): formatted_value = '"{}"'.format(value) elif isinstance(value, bool): formatted_value = str(value).lower() else: formatted_value = value raw_config = re.sub('{}\\s*=\\s*\\S+'.format(variable_name), '{} = {}'.format(variable_name, formatted_value), raw_config) with open(CONFIG_FILE, 'w') as config_file: config_file.write(raw_config)
Save the current configuration to the terraform.tfvars config file.
cli/config.py
save
wkk/binaryalert
1,324
python
def save(self) -> None: with open(CONFIG_FILE) as config_file: raw_config = config_file.read() for (variable_name, value) in self._config.items(): if isinstance(value, str): formatted_value = '"{}"'.format(value) elif isinstance(value, bool): formatted_value = str(value).lower() else: formatted_value = value raw_config = re.sub('{}\\s*=\\s*\\S+'.format(variable_name), '{} = {}'.format(variable_name, formatted_value), raw_config) with open(CONFIG_FILE, 'w') as config_file: config_file.write(raw_config)
def save(self) -> None: with open(CONFIG_FILE) as config_file: raw_config = config_file.read() for (variable_name, value) in self._config.items(): if isinstance(value, str): formatted_value = '"{}"'.format(value) elif isinstance(value, bool): formatted_value = str(value).lower() else: formatted_value = value raw_config = re.sub('{}\\s*=\\s*\\S+'.format(variable_name), '{} = {}'.format(variable_name, formatted_value), raw_config) with open(CONFIG_FILE, 'w') as config_file: config_file.write(raw_config)<|docstring|>Save the current configuration to the terraform.tfvars config file.<|endoftext|>
5970ddef8297e5edabbe1be8cfda2b186adfc40bfa456baaeb2ece16786c9fe9
def __init__(self, logs: list) -> None: 'Initialize parser.' self._logs = logs self.metric = Metric()
Initialize parser.
lpot/ux/utils/parser.py
__init__
yqhu/neural-compressor
0
python
def __init__(self, logs: list) -> None: self._logs = logs self.metric = Metric()
def __init__(self, logs: list) -> None: self._logs = logs self.metric = Metric()<|docstring|>Initialize parser.<|endoftext|>
991bb056731241d3c366d2d38b66a2269f955e36a3e580a4517fa4a10f6de695
def process(self) -> Dict[(str, Any)]: 'Process log files.' raise NotImplementedError
Process log files.
lpot/ux/utils/parser.py
process
yqhu/neural-compressor
0
python
def process(self) -> Dict[(str, Any)]: raise NotImplementedError
def process(self) -> Dict[(str, Any)]: raise NotImplementedError<|docstring|>Process log files.<|endoftext|>
54a73807ecd974cadbea1b7bfaefa751679ff1d6067005460de80cff0747b0d0
@property def patterns(self) -> dict: 'Set patterns to get metrics from lines.' raise NotImplementedError
Set patterns to get metrics from lines.
lpot/ux/utils/parser.py
patterns
yqhu/neural-compressor
0
python
@property def patterns(self) -> dict: raise NotImplementedError
@property def patterns(self) -> dict: raise NotImplementedError<|docstring|>Set patterns to get metrics from lines.<|endoftext|>
00d49c25ccf56f95844ee7c53a0da14c20c0e33fea693fdbf87d274f77ce6994
def process(self) -> Dict[(str, Any)]: 'Process files.' for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (match and match.groupdict().get(key)): requested_value = str(match.groupdict().get(key)) self.metric.insert_data(key, requested_value) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data
Process files.
lpot/ux/utils/parser.py
process
yqhu/neural-compressor
0
python
def process(self) -> Dict[(str, Any)]: for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (match and match.groupdict().get(key)): requested_value = str(match.groupdict().get(key)) self.metric.insert_data(key, requested_value) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data
def process(self) -> Dict[(str, Any)]: for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (match and match.groupdict().get(key)): requested_value = str(match.groupdict().get(key)) self.metric.insert_data(key, requested_value) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data<|docstring|>Process files.<|endoftext|>
bf6dae3628f4ba9b21feff465de6af286aac0234212b2999b6703b428cfeac57
@property def patterns(self) -> dict: 'Set patterns to get metrics from lines.' return {'acc_input_model': '.*FP32 baseline is:\\s+\\[((accuracy:\\s+(?P<acc_input_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'acc_optimized_model': '.*Best tune result is:\\s+\\[((accuracy:\\s+(?P<acc_optimized_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'path_optimized_model': '.*Save quantized model to (?P<path_optimized_model>.*)\\.'}
Set patterns to get metrics from lines.
lpot/ux/utils/parser.py
patterns
yqhu/neural-compressor
0
python
@property def patterns(self) -> dict: return {'acc_input_model': '.*FP32 baseline is:\\s+\\[((accuracy:\\s+(?P<acc_input_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'acc_optimized_model': '.*Best tune result is:\\s+\\[((accuracy:\\s+(?P<acc_optimized_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'path_optimized_model': '.*Save quantized model to (?P<path_optimized_model>.*)\\.'}
@property def patterns(self) -> dict: return {'acc_input_model': '.*FP32 baseline is:\\s+\\[((accuracy:\\s+(?P<acc_input_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'acc_optimized_model': '.*Best tune result is:\\s+\\[((accuracy:\\s+(?P<acc_optimized_model>(\\d+(\\.\\d+)?)))?(duration\\s+\\(seconds\\):\\s+(?P<duration>(\\d+(\\.\\d+)?)))?(memory footprint\\s+\\(MB\\):\\s+(?P<mem_footprint>(\\d+(\\.\\d+)?)))?(,\\s+)?)*\\]', 'path_optimized_model': '.*Save quantized model to (?P<path_optimized_model>.*)\\.'}<|docstring|>Set patterns to get metrics from lines.<|endoftext|>
8ec05355f0cbbee471e9ba11ac0c34204ea2a3f1115524a3def10b8a1eeb2c48
def process(self) -> Dict[(str, Any)]: 'Process files.' partial: Dict[(str, List)] = {} for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (not match): continue metric_name = f'perf_{key}_input_model' self.metric.insert_data(metric_name, match.group(1)) converted_value = getattr(self.metric, metric_name) parse_result = {key: converted_value} partial = self.update_partial(partial, parse_result) return self.summarize_partial(partial)
Process files.
lpot/ux/utils/parser.py
process
yqhu/neural-compressor
0
python
def process(self) -> Dict[(str, Any)]: partial: Dict[(str, List)] = {} for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (not match): continue metric_name = f'perf_{key}_input_model' self.metric.insert_data(metric_name, match.group(1)) converted_value = getattr(self.metric, metric_name) parse_result = {key: converted_value} partial = self.update_partial(partial, parse_result) return self.summarize_partial(partial)
def process(self) -> Dict[(str, Any)]: partial: Dict[(str, List)] = {} for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if (not match): continue metric_name = f'perf_{key}_input_model' self.metric.insert_data(metric_name, match.group(1)) converted_value = getattr(self.metric, metric_name) parse_result = {key: converted_value} partial = self.update_partial(partial, parse_result) return self.summarize_partial(partial)<|docstring|>Process files.<|endoftext|>
19ee95aee6ae78873e061262864e929b9b06ed30188778b47b412fbb9f1dcf38
@staticmethod def update_partial(partial: Dict[(str, List)], parsed_result: Dict[(str, Union[(float, int)])]) -> Dict[(str, List)]: 'Update partial entries.' for (key, value) in parsed_result.items(): if (key not in partial): partial[key] = [] partial[key].append(value) return partial
Update partial entries.
lpot/ux/utils/parser.py
update_partial
yqhu/neural-compressor
0
python
@staticmethod def update_partial(partial: Dict[(str, List)], parsed_result: Dict[(str, Union[(float, int)])]) -> Dict[(str, List)]: for (key, value) in parsed_result.items(): if (key not in partial): partial[key] = [] partial[key].append(value) return partial
@staticmethod def update_partial(partial: Dict[(str, List)], parsed_result: Dict[(str, Union[(float, int)])]) -> Dict[(str, List)]: for (key, value) in parsed_result.items(): if (key not in partial): partial[key] = [] partial[key].append(value) return partial<|docstring|>Update partial entries.<|endoftext|>
a87f12df9cd572a1887b56cc65870c1d99b1ccb01cdc241002c2a023a71cf75c
def summarize_partial(self, partial: dict) -> dict: 'Calculate final values.' summary = {} for (key, value) in partial.items(): summarized_value = self.summarize_value(key, value) for precision in ['input_model', 'optimized_model']: metric_name = f'perf_{key}_{precision}' summary[metric_name] = summarized_value return summary
Calculate final values.
lpot/ux/utils/parser.py
summarize_partial
yqhu/neural-compressor
0
python
def summarize_partial(self, partial: dict) -> dict: summary = {} for (key, value) in partial.items(): summarized_value = self.summarize_value(key, value) for precision in ['input_model', 'optimized_model']: metric_name = f'perf_{key}_{precision}' summary[metric_name] = summarized_value return summary
def summarize_partial(self, partial: dict) -> dict: summary = {} for (key, value) in partial.items(): summarized_value = self.summarize_value(key, value) for precision in ['input_model', 'optimized_model']: metric_name = f'perf_{key}_{precision}' summary[metric_name] = summarized_value return summary<|docstring|>Calculate final values.<|endoftext|>
9be70f2b2f0d2a0cff61f4036f25b830e6a89dbcc75dabe444f7c9022cc6863e
@staticmethod def summarize_value(key: str, value: list) -> Union[(float, int)]: 'Calculate final value.' if (key == 'latency'): return round((sum(value) / len(value)), 4) if (key == 'throughput'): return round(sum(value), 4) return value[0]
Calculate final value.
lpot/ux/utils/parser.py
summarize_value
yqhu/neural-compressor
0
python
@staticmethod def summarize_value(key: str, value: list) -> Union[(float, int)]: if (key == 'latency'): return round((sum(value) / len(value)), 4) if (key == 'throughput'): return round(sum(value), 4) return value[0]
@staticmethod def summarize_value(key: str, value: list) -> Union[(float, int)]: if (key == 'latency'): return round((sum(value) / len(value)), 4) if (key == 'throughput'): return round(sum(value), 4) return value[0]<|docstring|>Calculate final value.<|endoftext|>
df87604f01e14985df9721e228b03cd69f5b0fad7527b06f6ca4143ddd74a377
@property def patterns(self) -> dict: 'Set patterns to get metrics from lines.' return {'throughput': 'Throughput:\\s+(\\d+(\\.\\d+)?)', 'latency': 'Latency:\\s+(\\d+(\\.\\d+)?)'}
Set patterns to get metrics from lines.
lpot/ux/utils/parser.py
patterns
yqhu/neural-compressor
0
python
@property def patterns(self) -> dict: return {'throughput': 'Throughput:\\s+(\\d+(\\.\\d+)?)', 'latency': 'Latency:\\s+(\\d+(\\.\\d+)?)'}
@property def patterns(self) -> dict: return {'throughput': 'Throughput:\\s+(\\d+(\\.\\d+)?)', 'latency': 'Latency:\\s+(\\d+(\\.\\d+)?)'}<|docstring|>Set patterns to get metrics from lines.<|endoftext|>
41c192e24b4d2a105907f90c3d36b289d261821388f86c3c5f08f32eb5a613ad
def process(self) -> Dict[(str, Any)]: 'Process accuracy logs.' for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if match: for precision in ['input_model', 'optimized_model']: metric_name = f'acc_{precision}' self.metric.insert_data(metric_name, match.group(1)) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data
Process accuracy logs.
lpot/ux/utils/parser.py
process
yqhu/neural-compressor
0
python
def process(self) -> Dict[(str, Any)]: for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if match: for precision in ['input_model', 'optimized_model']: metric_name = f'acc_{precision}' self.metric.insert_data(metric_name, match.group(1)) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data
def process(self) -> Dict[(str, Any)]: for log_file in self._logs: log.debug(f'Read from {log_file}') with open(log_file) as f: for line in f: for key in self.patterns: prog = re.compile(self.patterns[key]) match = prog.search(line) if match: for precision in ['input_model', 'optimized_model']: metric_name = f'acc_{precision}' self.metric.insert_data(metric_name, match.group(1)) parsed_data: Dict[(str, Any)] = self.metric.serialize() return parsed_data<|docstring|>Process accuracy logs.<|endoftext|>
cfa467d85a01037c06385d263a7bb4da4cb4698f38ea8e6d7c7c0ab8a00b7408
@property def patterns(self) -> dict: 'Set patterns to get metrics from lines.' return {Benchmarks.ACC: 'Accuracy is (\\d+(\\.\\d+)?)'}
Set patterns to get metrics from lines.
lpot/ux/utils/parser.py
patterns
yqhu/neural-compressor
0
python
@property def patterns(self) -> dict: return {Benchmarks.ACC: 'Accuracy is (\\d+(\\.\\d+)?)'}
@property def patterns(self) -> dict: return {Benchmarks.ACC: 'Accuracy is (\\d+(\\.\\d+)?)'}<|docstring|>Set patterns to get metrics from lines.<|endoftext|>
e2d2ae70cb7d715f0fb9d361f1c84034e31da75bb304dcf1a692652bd11132a1
@staticmethod def get_parser(benchmark_mode: str, logs: List[str]) -> Parser: 'Get benchmark parser for specified mode.' parser_map = {Benchmarks.PERF: PerformanceParser, Benchmarks.ACC: AccuracyParser} parser = parser_map.get(benchmark_mode, None) if (parser is None): raise InternalException(f'Could not find optimization class for {benchmark_mode}') return parser(logs)
Get benchmark parser for specified mode.
lpot/ux/utils/parser.py
get_parser
yqhu/neural-compressor
0
python
@staticmethod def get_parser(benchmark_mode: str, logs: List[str]) -> Parser: parser_map = {Benchmarks.PERF: PerformanceParser, Benchmarks.ACC: AccuracyParser} parser = parser_map.get(benchmark_mode, None) if (parser is None): raise InternalException(f'Could not find optimization class for {benchmark_mode}') return parser(logs)
@staticmethod def get_parser(benchmark_mode: str, logs: List[str]) -> Parser: parser_map = {Benchmarks.PERF: PerformanceParser, Benchmarks.ACC: AccuracyParser} parser = parser_map.get(benchmark_mode, None) if (parser is None): raise InternalException(f'Could not find optimization class for {benchmark_mode}') return parser(logs)<|docstring|>Get benchmark parser for specified mode.<|endoftext|>
b2d0eab598f77f0c2b9abd4be5ac2b3f2111f925aeb792d6f2d84d623d27e5f2
def __init__(self): 'Initialize filter.' self.properties = {}
Initialize filter.
beacontools_mod/device_filters.py
__init__
kroa38/RaspLogger
3
python
def __init__(self): self.properties = {}
def __init__(self): self.properties = {}<|docstring|>Initialize filter.<|endoftext|>
543cb1ce4a704f640585fabaf26fa93623388ff93eb2a15cf649d695ce8b7bbe
def matches(self, filter_props): 'Check if the filter matches the supplied properties.' if (filter_props is None): return False found_one = False for (key, value) in filter_props.items(): if ((key in self.properties) and (not value.startswith(self.properties[key]))): return False elif ((key in self.properties) and value.startswith(self.properties[key])): found_one = True return found_one
Check if the filter matches the supplied properties.
beacontools_mod/device_filters.py
matches
kroa38/RaspLogger
3
python
def matches(self, filter_props): if (filter_props is None): return False found_one = False for (key, value) in filter_props.items(): if ((key in self.properties) and (not value.startswith(self.properties[key]))): return False elif ((key in self.properties) and value.startswith(self.properties[key])): found_one = True return found_one
def matches(self, filter_props): if (filter_props is None): return False found_one = False for (key, value) in filter_props.items(): if ((key in self.properties) and (not value.startswith(self.properties[key]))): return False elif ((key in self.properties) and value.startswith(self.properties[key])): found_one = True return found_one<|docstring|>Check if the filter matches the supplied properties.<|endoftext|>
43fc280cdb66c993e394b7bd23631625d917dc64d6587af44af756d07dabc3bc
def __init__(self, uuid=None, major=None, minor=None): 'Initialize filter.' super(IBeaconFilter, self).__init__() if ((uuid is None) and (major is None) and (minor is None)): raise ValueError('IBeaconFilter needs at least one argument set') if (uuid is not None): self.properties['uuid'] = uuid if (major is not None): self.properties['major'] = major if (minor is not None): self.properties['minor'] = minor
Initialize filter.
beacontools_mod/device_filters.py
__init__
kroa38/RaspLogger
3
python
def __init__(self, uuid=None, major=None, minor=None): super(IBeaconFilter, self).__init__() if ((uuid is None) and (major is None) and (minor is None)): raise ValueError('IBeaconFilter needs at least one argument set') if (uuid is not None): self.properties['uuid'] = uuid if (major is not None): self.properties['major'] = major if (minor is not None): self.properties['minor'] = minor
def __init__(self, uuid=None, major=None, minor=None): super(IBeaconFilter, self).__init__() if ((uuid is None) and (major is None) and (minor is None)): raise ValueError('IBeaconFilter needs at least one argument set') if (uuid is not None): self.properties['uuid'] = uuid if (major is not None): self.properties['major'] = major if (minor is not None): self.properties['minor'] = minor<|docstring|>Initialize filter.<|endoftext|>
dfae2671da83c28c964a5aa30fe8fb8d8759288f264d4ef383966f294f6f5e25
def __init__(self, namespace=None, instance=None): 'Initialize filter.' super(EddystoneFilter, self).__init__() if ((namespace is None) and (instance is None)): raise ValueError('EddystoneFilter needs at least one argument set') if (namespace is not None): self.properties['namespace'] = namespace if (instance is not None): self.properties['instance'] = instance
Initialize filter.
beacontools_mod/device_filters.py
__init__
kroa38/RaspLogger
3
python
def __init__(self, namespace=None, instance=None): super(EddystoneFilter, self).__init__() if ((namespace is None) and (instance is None)): raise ValueError('EddystoneFilter needs at least one argument set') if (namespace is not None): self.properties['namespace'] = namespace if (instance is not None): self.properties['instance'] = instance
def __init__(self, namespace=None, instance=None): super(EddystoneFilter, self).__init__() if ((namespace is None) and (instance is None)): raise ValueError('EddystoneFilter needs at least one argument set') if (namespace is not None): self.properties['namespace'] = namespace if (instance is not None): self.properties['instance'] = instance<|docstring|>Initialize filter.<|endoftext|>
e627b346eab08ecdbb47dc3d1b379efb5f199293a3ed6354cfd5f4d3b652817c
def __init__(self, identifier=None, protocol_version=None): 'Initialize filter.' super(EstimoteFilter, self).__init__() if ((identifier is None) and (protocol_version is None)): raise ValueError('EstimoteFilter needs at least one argument set') if (identifier is not None): self.properties['identifier'] = identifier if (protocol_version is not None): self.properties['protocol_version'] = protocol_version
Initialize filter.
beacontools_mod/device_filters.py
__init__
kroa38/RaspLogger
3
python
def __init__(self, identifier=None, protocol_version=None): super(EstimoteFilter, self).__init__() if ((identifier is None) and (protocol_version is None)): raise ValueError('EstimoteFilter needs at least one argument set') if (identifier is not None): self.properties['identifier'] = identifier if (protocol_version is not None): self.properties['protocol_version'] = protocol_version
def __init__(self, identifier=None, protocol_version=None): super(EstimoteFilter, self).__init__() if ((identifier is None) and (protocol_version is None)): raise ValueError('EstimoteFilter needs at least one argument set') if (identifier is not None): self.properties['identifier'] = identifier if (protocol_version is not None): self.properties['protocol_version'] = protocol_version<|docstring|>Initialize filter.<|endoftext|>
c087bd1caded6b838107f6b447f7efd5d69a7c55f2d2ebef3a7c51eb869960de
def __init__(self, bt_addr): 'Initialize filter.' super(BtAddrFilter, self).__init__() try: bt_addr = bt_addr.lower() except AttributeError: raise ValueError("bt_addr({}) wasn't a string".format(bt_addr)) if (not is_valid_mac(bt_addr)): raise ValueError('Invalid bluetooth MAC address given, format should match aa:bb:cc:dd:ee:ff') self.properties['bt_addr'] = bt_addr
Initialize filter.
beacontools_mod/device_filters.py
__init__
kroa38/RaspLogger
3
python
def __init__(self, bt_addr): super(BtAddrFilter, self).__init__() try: bt_addr = bt_addr.lower() except AttributeError: raise ValueError("bt_addr({}) wasn't a string".format(bt_addr)) if (not is_valid_mac(bt_addr)): raise ValueError('Invalid bluetooth MAC address given, format should match aa:bb:cc:dd:ee:ff') self.properties['bt_addr'] = bt_addr
def __init__(self, bt_addr): super(BtAddrFilter, self).__init__() try: bt_addr = bt_addr.lower() except AttributeError: raise ValueError("bt_addr({}) wasn't a string".format(bt_addr)) if (not is_valid_mac(bt_addr)): raise ValueError('Invalid bluetooth MAC address given, format should match aa:bb:cc:dd:ee:ff') self.properties['bt_addr'] = bt_addr<|docstring|>Initialize filter.<|endoftext|>
c43d490ab9a299ba4c1f77a5a7566d4df8c733f87b07430f65c024b92982cbe2
def load(self, path): ' A method that load a JSON file.\n\n Parameters\n ----------\n path: str\n the path to the JOSN file.\n\n Returns\n -------\n data: object\n the loaded data.\n ' with open(path, 'rt') as open_file: data = json.load(open_file) return data
A method that load a JSON file. Parameters ---------- path: str the path to the JOSN file. Returns ------- data: object the loaded data.
caravel/loaders/_json.py
load
neurospin/pycaravel
0
python
def load(self, path): ' A method that load a JSON file.\n\n Parameters\n ----------\n path: str\n the path to the JOSN file.\n\n Returns\n -------\n data: object\n the loaded data.\n ' with open(path, 'rt') as open_file: data = json.load(open_file) return data
def load(self, path): ' A method that load a JSON file.\n\n Parameters\n ----------\n path: str\n the path to the JOSN file.\n\n Returns\n -------\n data: object\n the loaded data.\n ' with open(path, 'rt') as open_file: data = json.load(open_file) return data<|docstring|>A method that load a JSON file. Parameters ---------- path: str the path to the JOSN file. Returns ------- data: object the loaded data.<|endoftext|>
d8d2ac482dac49a6a5bbc7e31fcd398d003d4af3d08e8db0fefefe4acc532a8c
def save(self, data, outpath): ' A method that save the data in a JSON file.\n\n Parameters\n ----------\n data: object\n the data to be saved.\n outpath: str\n the path where the the data will be saved.\n ' with open(path, 'wt') as open_file: json.dump(data, open_file, indent=4)
A method that save the data in a JSON file. Parameters ---------- data: object the data to be saved. outpath: str the path where the the data will be saved.
caravel/loaders/_json.py
save
neurospin/pycaravel
0
python
def save(self, data, outpath): ' A method that save the data in a JSON file.\n\n Parameters\n ----------\n data: object\n the data to be saved.\n outpath: str\n the path where the the data will be saved.\n ' with open(path, 'wt') as open_file: json.dump(data, open_file, indent=4)
def save(self, data, outpath): ' A method that save the data in a JSON file.\n\n Parameters\n ----------\n data: object\n the data to be saved.\n outpath: str\n the path where the the data will be saved.\n ' with open(path, 'wt') as open_file: json.dump(data, open_file, indent=4)<|docstring|>A method that save the data in a JSON file. Parameters ---------- data: object the data to be saved. outpath: str the path where the the data will be saved.<|endoftext|>
47a7872e30b721c09f20059ac2e8f46d0621ad0fae4e99040808ffc4284eee7a
def average_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :rtype: (float,float,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.int32) or (value.dtype == np.float32)) index = np.ndarray(shape=value.shape, dtype=np.uint32) v_min = np.min(value) v_max = np.max(value) if zero_is_special: cluster_size = (size - 1) step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): if isclose(value.flat[i], 0, rel_tol=zero_rel_tol): index.flat[i] = 0 else: index.flat[i] = (round(((value.flat[i] - v_min) / step)) + 1) return ((v_min - step), step, index) else: cluster_size = size step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): index.flat[i] = round(((value.flat[i] - v_min) / step)) return (v_min, step, index)
:type value: np.ndarray :type size: int :type zero_is_special: bool :type zero_rel_tol: float :rtype: (float,float,np.ndarray)
xinmei/tools/tensorflow_scripts/quantize/slice.py
average_slice
lh-ycx/tensorflow
0
python
def average_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :rtype: (float,float,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.int32) or (value.dtype == np.float32)) index = np.ndarray(shape=value.shape, dtype=np.uint32) v_min = np.min(value) v_max = np.max(value) if zero_is_special: cluster_size = (size - 1) step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): if isclose(value.flat[i], 0, rel_tol=zero_rel_tol): index.flat[i] = 0 else: index.flat[i] = (round(((value.flat[i] - v_min) / step)) + 1) return ((v_min - step), step, index) else: cluster_size = size step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): index.flat[i] = round(((value.flat[i] - v_min) / step)) return (v_min, step, index)
def average_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :rtype: (float,float,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.int32) or (value.dtype == np.float32)) index = np.ndarray(shape=value.shape, dtype=np.uint32) v_min = np.min(value) v_max = np.max(value) if zero_is_special: cluster_size = (size - 1) step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): if isclose(value.flat[i], 0, rel_tol=zero_rel_tol): index.flat[i] = 0 else: index.flat[i] = (round(((value.flat[i] - v_min) / step)) + 1) return ((v_min - step), step, index) else: cluster_size = size step = ((v_max - v_min) / (cluster_size - 1)) for i in range(value.size): index.flat[i] = round(((value.flat[i] - v_min) / step)) return (v_min, step, index)<|docstring|>:type value: np.ndarray :type size: int :type zero_is_special: bool :type zero_rel_tol: float :rtype: (float,float,np.ndarray)<|endoftext|>
41d46541f2a574d7748dbb2b375462d94a5eda89729a7d4acddf2fb8d4f11c5c
def k_means_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05, n_jobs=1): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :type n_jobs: int\n :rtype: (np.ndarray,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.float32) or (value.dtype == np.int32)) v_min = np.min(value) v_max = np.max(value) if (zero_is_special and (v_min < 0 < v_max)): has_zero_center = True else: has_zero_center = False k_means = KMeans(n_clusters=(size - (1 if has_zero_center else 0)), n_jobs=n_jobs) if has_zero_center: fit_data = [] for i in range(value.size): if (not isclose(value.flat[i], 0, rel_tol=zero_rel_tol)): fit_data.append([i, value.flat[i]]) fit_data = np.array(fit_data, dtype=np.float32) k_means.fit(reshaped_view(fit_data[(:, 1)], ((- 1), 1))) index = np.ndarray(shape=value.shape, dtype=np.uint32) index.fill(0) for i in range(len(fit_data)): real_index = int(fit_data[i][0]) if (not isclose(value.flat[real_index], 0, rel_tol=zero_rel_tol)): index.flat[real_index] = k_means.labels_[i] else: k_means.fit(reshaped_view(value, ((- 1), 1))) index = reshaped_view(k_means.labels_) table = reshaped_view(k_means.cluster_centers_) if (value.dtype == np.int32): table = table.round().astype(np.int32) return (table.astype(value.dtype), index.astype(np.uint32))
:type value: np.ndarray :type size: int :type zero_is_special: bool :type zero_rel_tol: float :type n_jobs: int :rtype: (np.ndarray,np.ndarray)
xinmei/tools/tensorflow_scripts/quantize/slice.py
k_means_slice
lh-ycx/tensorflow
0
python
def k_means_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05, n_jobs=1): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :type n_jobs: int\n :rtype: (np.ndarray,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.float32) or (value.dtype == np.int32)) v_min = np.min(value) v_max = np.max(value) if (zero_is_special and (v_min < 0 < v_max)): has_zero_center = True else: has_zero_center = False k_means = KMeans(n_clusters=(size - (1 if has_zero_center else 0)), n_jobs=n_jobs) if has_zero_center: fit_data = [] for i in range(value.size): if (not isclose(value.flat[i], 0, rel_tol=zero_rel_tol)): fit_data.append([i, value.flat[i]]) fit_data = np.array(fit_data, dtype=np.float32) k_means.fit(reshaped_view(fit_data[(:, 1)], ((- 1), 1))) index = np.ndarray(shape=value.shape, dtype=np.uint32) index.fill(0) for i in range(len(fit_data)): real_index = int(fit_data[i][0]) if (not isclose(value.flat[real_index], 0, rel_tol=zero_rel_tol)): index.flat[real_index] = k_means.labels_[i] else: k_means.fit(reshaped_view(value, ((- 1), 1))) index = reshaped_view(k_means.labels_) table = reshaped_view(k_means.cluster_centers_) if (value.dtype == np.int32): table = table.round().astype(np.int32) return (table.astype(value.dtype), index.astype(np.uint32))
def k_means_slice(value, size, zero_is_special=True, zero_rel_tol=1e-05, n_jobs=1): '\n :type value: np.ndarray\n :type size: int\n :type zero_is_special: bool\n :type zero_rel_tol: float\n :type n_jobs: int\n :rtype: (np.ndarray,np.ndarray)\n ' assert isinstance(value, np.ndarray) assert ((value.dtype == np.float32) or (value.dtype == np.int32)) v_min = np.min(value) v_max = np.max(value) if (zero_is_special and (v_min < 0 < v_max)): has_zero_center = True else: has_zero_center = False k_means = KMeans(n_clusters=(size - (1 if has_zero_center else 0)), n_jobs=n_jobs) if has_zero_center: fit_data = [] for i in range(value.size): if (not isclose(value.flat[i], 0, rel_tol=zero_rel_tol)): fit_data.append([i, value.flat[i]]) fit_data = np.array(fit_data, dtype=np.float32) k_means.fit(reshaped_view(fit_data[(:, 1)], ((- 1), 1))) index = np.ndarray(shape=value.shape, dtype=np.uint32) index.fill(0) for i in range(len(fit_data)): real_index = int(fit_data[i][0]) if (not isclose(value.flat[real_index], 0, rel_tol=zero_rel_tol)): index.flat[real_index] = k_means.labels_[i] else: k_means.fit(reshaped_view(value, ((- 1), 1))) index = reshaped_view(k_means.labels_) table = reshaped_view(k_means.cluster_centers_) if (value.dtype == np.int32): table = table.round().astype(np.int32) return (table.astype(value.dtype), index.astype(np.uint32))<|docstring|>:type value: np.ndarray :type size: int :type zero_is_special: bool :type zero_rel_tol: float :type n_jobs: int :rtype: (np.ndarray,np.ndarray)<|endoftext|>
b0b81977eee64e90cabe56035a053cf21210dd5919f015e49b8f6fae290af277
def setup_function(function): ' executed before each method call\n ' print('\n\nSETUP ==> ') User.delete_all()
executed before each method call
tests/test_locale_support.py
setup_function
thingsplode/appkernel
156
python
def setup_function(function): ' \n ' print('\n\nSETUP ==> ') User.delete_all()
def setup_function(function): ' \n ' print('\n\nSETUP ==> ') User.delete_all()<|docstring|>executed before each method call<|endoftext|>
c73ed01dfa25daf0442817c88c5fe3aaa9700df3b39c906fdf8c2922b68c5f21
def test_basic_translation(client): '\n\n :param client:\n :type client: Client\n :return:\n ' header_types = ['de, de-de;q=0.8, en;q=0.7', 'de-de, de;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5', 'de', 'de-de'] for header in header_types: print('\n==== current header [{}] ===='.format(header)) rsp = client.get('/users/meta', headers={'Accept-Language': header}) result = rsp.json print('\n{}'.format(json.dumps(result, indent=2))) assert (200 <= rsp.status_code < 300) validate_result(result)
:param client: :type client: Client :return:
tests/test_locale_support.py
test_basic_translation
thingsplode/appkernel
156
python
def test_basic_translation(client): '\n\n :param client:\n :type client: Client\n :return:\n ' header_types = ['de, de-de;q=0.8, en;q=0.7', 'de-de, de;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5', 'de', 'de-de'] for header in header_types: print('\n==== current header [{}] ===='.format(header)) rsp = client.get('/users/meta', headers={'Accept-Language': header}) result = rsp.json print('\n{}'.format(json.dumps(result, indent=2))) assert (200 <= rsp.status_code < 300) validate_result(result)
def test_basic_translation(client): '\n\n :param client:\n :type client: Client\n :return:\n ' header_types = ['de, de-de;q=0.8, en;q=0.7', 'de-de, de;q=0.9, en;q=0.8, de;q=0.7, *;q=0.5', 'de', 'de-de'] for header in header_types: print('\n==== current header [{}] ===='.format(header)) rsp = client.get('/users/meta', headers={'Accept-Language': header}) result = rsp.json print('\n{}'.format(json.dumps(result, indent=2))) assert (200 <= rsp.status_code < 300) validate_result(result)<|docstring|>:param client: :type client: Client :return:<|endoftext|>
da45b9df7172b90a780e8fd35b3e130f37452c1b916ded400916b4795f2c68de
@distributed_trace def get_null(self, **kwargs): 'Get null duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_null.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Get null duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError
test/vanilla/Expected/AcceptanceTests/BodyDuration/bodyduration/operations/_duration_operations.py
get_null
Azure/autorest.azure-functions-python
4
python
@distributed_trace def get_null(self, **kwargs): 'Get null duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_null.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def get_null(self, **kwargs): 'Get null duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_null.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Get null duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
8580dda208bb88ff6fbb062ff80edf1af59128420c84c84ebf190563594898c4
@distributed_trace def put_positive_duration(self, duration_body, **kwargs): 'Put a positive duration value.\n\n :param duration_body:\n :type duration_body: ~datetime.timedelta\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') url = self.put_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str') body_content_kwargs = {} body_content = self._serialize.body(duration_body, 'duration') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {})
Put a positive duration value. :param duration_body: :type duration_body: ~datetime.timedelta :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError
test/vanilla/Expected/AcceptanceTests/BodyDuration/bodyduration/operations/_duration_operations.py
put_positive_duration
Azure/autorest.azure-functions-python
4
python
@distributed_trace def put_positive_duration(self, duration_body, **kwargs): 'Put a positive duration value.\n\n :param duration_body:\n :type duration_body: ~datetime.timedelta\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') url = self.put_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str') body_content_kwargs = {} body_content = self._serialize.body(duration_body, 'duration') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {})
@distributed_trace def put_positive_duration(self, duration_body, **kwargs): 'Put a positive duration value.\n\n :param duration_body:\n :type duration_body: ~datetime.timedelta\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: None, or the result of cls(response)\n :rtype: None\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', 'application/json') url = self.put_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Content-Type'] = self._serialize.header('content_type', content_type, 'str') body_content_kwargs = {} body_content = self._serialize.body(duration_body, 'duration') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {})<|docstring|>Put a positive duration value. :param duration_body: :type duration_body: ~datetime.timedelta :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
18139e2c68bee8e6565bce1d293bd4134923480ab2d925b38bbcac0e2a76880a
@distributed_trace def get_positive_duration(self, **kwargs): 'Get a positive duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Get a positive duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError
test/vanilla/Expected/AcceptanceTests/BodyDuration/bodyduration/operations/_duration_operations.py
get_positive_duration
Azure/autorest.azure-functions-python
4
python
@distributed_trace def get_positive_duration(self, **kwargs): 'Get a positive duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def get_positive_duration(self, **kwargs): 'Get a positive duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_positive_duration.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Get a positive duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
99a530089a427af46d03cdcc2d376e1d115415a27ba218a7ec96de823c7403bc
@distributed_trace def get_invalid(self, **kwargs): 'Get an invalid duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_invalid.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
Get an invalid duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError
test/vanilla/Expected/AcceptanceTests/BodyDuration/bodyduration/operations/_duration_operations.py
get_invalid
Azure/autorest.azure-functions-python
4
python
@distributed_trace def get_invalid(self, **kwargs): 'Get an invalid duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_invalid.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized
@distributed_trace def get_invalid(self, **kwargs): 'Get an invalid duration value.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: timedelta, or the result of cls(response)\n :rtype: ~datetime.timedelta\n :raises: ~azure.core.exceptions.HttpResponseError\n ' cls = kwargs.pop('cls', None) error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} error_map.update(kwargs.pop('error_map', {})) url = self.get_invalid.metadata['url'] query_parameters = {} header_parameters = {} header_parameters['Accept'] = 'application/json' request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if (response.status_code not in [200]): map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize(models.Error, response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('duration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized<|docstring|>Get an invalid duration value. :keyword callable cls: A custom type or function that will be passed the direct response :return: timedelta, or the result of cls(response) :rtype: ~datetime.timedelta :raises: ~azure.core.exceptions.HttpResponseError<|endoftext|>
1a13188d81bbde1c26fca0521d9d852b1053d424bd1773a5a234f0e11caf844f
def get_managed_zone(client_operation_id: Optional[str]=None, managed_zone: Optional[str]=None, project: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetManagedZoneResult: '\n Fetches the representation of an existing ManagedZone.\n ' __args__ = dict() __args__['clientOperationId'] = client_operation_id __args__['managedZone'] = managed_zone __args__['project'] = project if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:dns/v1beta2:getManagedZone', __args__, opts=opts, typ=GetManagedZoneResult).value return AwaitableGetManagedZoneResult(cloud_logging_config=__ret__.cloud_logging_config, creation_time=__ret__.creation_time, description=__ret__.description, dns_name=__ret__.dns_name, dnssec_config=__ret__.dnssec_config, forwarding_config=__ret__.forwarding_config, kind=__ret__.kind, labels=__ret__.labels, name=__ret__.name, name_server_set=__ret__.name_server_set, name_servers=__ret__.name_servers, peering_config=__ret__.peering_config, private_visibility_config=__ret__.private_visibility_config, reverse_lookup_config=__ret__.reverse_lookup_config, service_directory_config=__ret__.service_directory_config, visibility=__ret__.visibility)
Fetches the representation of an existing ManagedZone.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
get_managed_zone
AaronFriel/pulumi-google-native
44
python
def get_managed_zone(client_operation_id: Optional[str]=None, managed_zone: Optional[str]=None, project: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetManagedZoneResult: '\n \n ' __args__ = dict() __args__['clientOperationId'] = client_operation_id __args__['managedZone'] = managed_zone __args__['project'] = project if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:dns/v1beta2:getManagedZone', __args__, opts=opts, typ=GetManagedZoneResult).value return AwaitableGetManagedZoneResult(cloud_logging_config=__ret__.cloud_logging_config, creation_time=__ret__.creation_time, description=__ret__.description, dns_name=__ret__.dns_name, dnssec_config=__ret__.dnssec_config, forwarding_config=__ret__.forwarding_config, kind=__ret__.kind, labels=__ret__.labels, name=__ret__.name, name_server_set=__ret__.name_server_set, name_servers=__ret__.name_servers, peering_config=__ret__.peering_config, private_visibility_config=__ret__.private_visibility_config, reverse_lookup_config=__ret__.reverse_lookup_config, service_directory_config=__ret__.service_directory_config, visibility=__ret__.visibility)
def get_managed_zone(client_operation_id: Optional[str]=None, managed_zone: Optional[str]=None, project: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetManagedZoneResult: '\n \n ' __args__ = dict() __args__['clientOperationId'] = client_operation_id __args__['managedZone'] = managed_zone __args__['project'] = project if (opts is None): opts = pulumi.InvokeOptions() if (opts.version is None): opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('google-native:dns/v1beta2:getManagedZone', __args__, opts=opts, typ=GetManagedZoneResult).value return AwaitableGetManagedZoneResult(cloud_logging_config=__ret__.cloud_logging_config, creation_time=__ret__.creation_time, description=__ret__.description, dns_name=__ret__.dns_name, dnssec_config=__ret__.dnssec_config, forwarding_config=__ret__.forwarding_config, kind=__ret__.kind, labels=__ret__.labels, name=__ret__.name, name_server_set=__ret__.name_server_set, name_servers=__ret__.name_servers, peering_config=__ret__.peering_config, private_visibility_config=__ret__.private_visibility_config, reverse_lookup_config=__ret__.reverse_lookup_config, service_directory_config=__ret__.service_directory_config, visibility=__ret__.visibility)<|docstring|>Fetches the representation of an existing ManagedZone.<|endoftext|>
5de0a2ecc016d41e5ef461d91a30f8aa1122817c36b4ceafa03be28c6b485ecb
@_utilities.lift_output_func(get_managed_zone) def get_managed_zone_output(client_operation_id: Optional[pulumi.Input[Optional[str]]]=None, managed_zone: Optional[pulumi.Input[str]]=None, project: Optional[pulumi.Input[Optional[str]]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetManagedZoneResult]: '\n Fetches the representation of an existing ManagedZone.\n ' ...
Fetches the representation of an existing ManagedZone.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
get_managed_zone_output
AaronFriel/pulumi-google-native
44
python
@_utilities.lift_output_func(get_managed_zone) def get_managed_zone_output(client_operation_id: Optional[pulumi.Input[Optional[str]]]=None, managed_zone: Optional[pulumi.Input[str]]=None, project: Optional[pulumi.Input[Optional[str]]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetManagedZoneResult]: '\n \n ' ...
@_utilities.lift_output_func(get_managed_zone) def get_managed_zone_output(client_operation_id: Optional[pulumi.Input[Optional[str]]]=None, managed_zone: Optional[pulumi.Input[str]]=None, project: Optional[pulumi.Input[Optional[str]]]=None, opts: Optional[pulumi.InvokeOptions]=None) -> pulumi.Output[GetManagedZoneResult]: '\n \n ' ...<|docstring|>Fetches the representation of an existing ManagedZone.<|endoftext|>
c4d5d949c1a66b9dcbdb3d6f0c7d60a67c36198ebe50ffaf5476fc14cda5c7c6
@property @pulumi.getter(name='creationTime') def creation_time(self) -> str: '\n The time that this resource was created on the server. This is in RFC3339 text format. Output only.\n ' return pulumi.get(self, 'creation_time')
The time that this resource was created on the server. This is in RFC3339 text format. Output only.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
creation_time
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='creationTime') def creation_time(self) -> str: '\n \n ' return pulumi.get(self, 'creation_time')
@property @pulumi.getter(name='creationTime') def creation_time(self) -> str: '\n \n ' return pulumi.get(self, 'creation_time')<|docstring|>The time that this resource was created on the server. This is in RFC3339 text format. Output only.<|endoftext|>
b61a897ecd1bf989b4aca163592a2bea469520dad1e03f623bbcb21218413361
@property @pulumi.getter def description(self) -> str: "\n A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function.\n " return pulumi.get(self, 'description')
A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
description
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter def description(self) -> str: "\n \n " return pulumi.get(self, 'description')
@property @pulumi.getter def description(self) -> str: "\n \n " return pulumi.get(self, 'description')<|docstring|>A mutable string of at most 1024 characters associated with this resource for the user's convenience. Has no effect on the managed zone's function.<|endoftext|>
1870a69688e102514491d2da600cfaeeb5ef421bcbe10416af9bd6fff7f6da42
@property @pulumi.getter(name='dnsName') def dns_name(self) -> str: '\n The DNS name of this managed zone, for instance "example.com.".\n ' return pulumi.get(self, 'dns_name')
The DNS name of this managed zone, for instance "example.com.".
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
dns_name
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='dnsName') def dns_name(self) -> str: '\n \n ' return pulumi.get(self, 'dns_name')
@property @pulumi.getter(name='dnsName') def dns_name(self) -> str: '\n \n ' return pulumi.get(self, 'dns_name')<|docstring|>The DNS name of this managed zone, for instance "example.com.".<|endoftext|>
2016ad337254cd5a1107bbf28f814f7ddd5467b62cc810496ed19df7e55844fd
@property @pulumi.getter(name='dnssecConfig') def dnssec_config(self) -> 'outputs.ManagedZoneDnsSecConfigResponse': '\n DNSSEC configuration.\n ' return pulumi.get(self, 'dnssec_config')
DNSSEC configuration.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
dnssec_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='dnssecConfig') def dnssec_config(self) -> 'outputs.ManagedZoneDnsSecConfigResponse': '\n \n ' return pulumi.get(self, 'dnssec_config')
@property @pulumi.getter(name='dnssecConfig') def dnssec_config(self) -> 'outputs.ManagedZoneDnsSecConfigResponse': '\n \n ' return pulumi.get(self, 'dnssec_config')<|docstring|>DNSSEC configuration.<|endoftext|>
0261fd89f80d0c7aa582a6e97e4c74a8ada1406b47f4b7027e5d7c2e78ec616e
@property @pulumi.getter(name='forwardingConfig') def forwarding_config(self) -> 'outputs.ManagedZoneForwardingConfigResponse': '\n The presence for this field indicates that outbound forwarding is enabled for this zone. The value of this field contains the set of destinations to forward to.\n ' return pulumi.get(self, 'forwarding_config')
The presence for this field indicates that outbound forwarding is enabled for this zone. The value of this field contains the set of destinations to forward to.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
forwarding_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='forwardingConfig') def forwarding_config(self) -> 'outputs.ManagedZoneForwardingConfigResponse': '\n \n ' return pulumi.get(self, 'forwarding_config')
@property @pulumi.getter(name='forwardingConfig') def forwarding_config(self) -> 'outputs.ManagedZoneForwardingConfigResponse': '\n \n ' return pulumi.get(self, 'forwarding_config')<|docstring|>The presence for this field indicates that outbound forwarding is enabled for this zone. The value of this field contains the set of destinations to forward to.<|endoftext|>
2f6942e6e6502d431cdf8b4ae5d46501b95e4520ecf9a2f98d8e6cd77d3ae063
@property @pulumi.getter def labels(self) -> Mapping[(str, str)]: '\n User labels.\n ' return pulumi.get(self, 'labels')
User labels.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
labels
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter def labels(self) -> Mapping[(str, str)]: '\n \n ' return pulumi.get(self, 'labels')
@property @pulumi.getter def labels(self) -> Mapping[(str, str)]: '\n \n ' return pulumi.get(self, 'labels')<|docstring|>User labels.<|endoftext|>
97035c05ca9cff1c569dc5a712a7f9aeca8082247fdb5ed15b79511c79233bf3
@property @pulumi.getter def name(self) -> str: '\n User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.\n ' return pulumi.get(self, 'name')
User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
name
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')
@property @pulumi.getter def name(self) -> str: '\n \n ' return pulumi.get(self, 'name')<|docstring|>User assigned name for this resource. Must be unique within the project. The name must be 1-63 characters long, must begin with a letter, end with a letter or digit, and only contain lowercase letters, digits or dashes.<|endoftext|>
dd06dae658b9f4b557cae5e82275d9bd0d70b776688c6e4167e96437ff3e0b4f
@property @pulumi.getter(name='nameServerSet') def name_server_set(self) -> str: '\n Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users leave this field unset. If you need to use this field, contact your account team.\n ' return pulumi.get(self, 'name_server_set')
Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users leave this field unset. If you need to use this field, contact your account team.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
name_server_set
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='nameServerSet') def name_server_set(self) -> str: '\n \n ' return pulumi.get(self, 'name_server_set')
@property @pulumi.getter(name='nameServerSet') def name_server_set(self) -> str: '\n \n ' return pulumi.get(self, 'name_server_set')<|docstring|>Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is a set of DNS name servers that all host the same ManagedZones. Most users leave this field unset. If you need to use this field, contact your account team.<|endoftext|>
62ea1e63f3b34e9fdf56db7337efd0908cb32300414166ab970c0e185ada4ac7
@property @pulumi.getter(name='nameServers') def name_servers(self) -> Sequence[str]: '\n Delegate your managed_zone to these virtual name servers; defined by the server (output only)\n ' return pulumi.get(self, 'name_servers')
Delegate your managed_zone to these virtual name servers; defined by the server (output only)
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
name_servers
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='nameServers') def name_servers(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'name_servers')
@property @pulumi.getter(name='nameServers') def name_servers(self) -> Sequence[str]: '\n \n ' return pulumi.get(self, 'name_servers')<|docstring|>Delegate your managed_zone to these virtual name servers; defined by the server (output only)<|endoftext|>
faf613d79e1b3490f00478f4589406b0b5d371e6518cd14118f6d9852a943661
@property @pulumi.getter(name='peeringConfig') def peering_config(self) -> 'outputs.ManagedZonePeeringConfigResponse': '\n The presence of this field indicates that DNS Peering is enabled for this zone. The value of this field contains the network to peer with.\n ' return pulumi.get(self, 'peering_config')
The presence of this field indicates that DNS Peering is enabled for this zone. The value of this field contains the network to peer with.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
peering_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='peeringConfig') def peering_config(self) -> 'outputs.ManagedZonePeeringConfigResponse': '\n \n ' return pulumi.get(self, 'peering_config')
@property @pulumi.getter(name='peeringConfig') def peering_config(self) -> 'outputs.ManagedZonePeeringConfigResponse': '\n \n ' return pulumi.get(self, 'peering_config')<|docstring|>The presence of this field indicates that DNS Peering is enabled for this zone. The value of this field contains the network to peer with.<|endoftext|>
ca6b5a0953b87bb34a5488822ccbfe5725677912670162e50961f857b3ad7486
@property @pulumi.getter(name='privateVisibilityConfig') def private_visibility_config(self) -> 'outputs.ManagedZonePrivateVisibilityConfigResponse': '\n For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from.\n ' return pulumi.get(self, 'private_visibility_config')
For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
private_visibility_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='privateVisibilityConfig') def private_visibility_config(self) -> 'outputs.ManagedZonePrivateVisibilityConfigResponse': '\n \n ' return pulumi.get(self, 'private_visibility_config')
@property @pulumi.getter(name='privateVisibilityConfig') def private_visibility_config(self) -> 'outputs.ManagedZonePrivateVisibilityConfigResponse': '\n \n ' return pulumi.get(self, 'private_visibility_config')<|docstring|>For privately visible zones, the set of Virtual Private Cloud resources that the zone is visible from.<|endoftext|>
b9fee28e35373ad95599f7feebc4a4cc94f4f0d64786e9085ede2db14bb2717d
@property @pulumi.getter(name='reverseLookupConfig') def reverse_lookup_config(self) -> 'outputs.ManagedZoneReverseLookupConfigResponse': '\n The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS resolves reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config.\n ' return pulumi.get(self, 'reverse_lookup_config')
The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS resolves reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
reverse_lookup_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='reverseLookupConfig') def reverse_lookup_config(self) -> 'outputs.ManagedZoneReverseLookupConfigResponse': '\n \n ' return pulumi.get(self, 'reverse_lookup_config')
@property @pulumi.getter(name='reverseLookupConfig') def reverse_lookup_config(self) -> 'outputs.ManagedZoneReverseLookupConfigResponse': '\n \n ' return pulumi.get(self, 'reverse_lookup_config')<|docstring|>The presence of this field indicates that this is a managed reverse lookup zone and Cloud DNS resolves reverse lookup queries using automatically configured records for VPC resources. This only applies to networks listed under private_visibility_config.<|endoftext|>
4d10e72c362621218e47d3757caf3bda326c2b9a5da7ba22f46e887c0456f710
@property @pulumi.getter(name='serviceDirectoryConfig') def service_directory_config(self) -> 'outputs.ManagedZoneServiceDirectoryConfigResponse': '\n This field links to the associated service directory namespace. Do not set this field for public zones or forwarding zones.\n ' return pulumi.get(self, 'service_directory_config')
This field links to the associated service directory namespace. Do not set this field for public zones or forwarding zones.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
service_directory_config
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter(name='serviceDirectoryConfig') def service_directory_config(self) -> 'outputs.ManagedZoneServiceDirectoryConfigResponse': '\n \n ' return pulumi.get(self, 'service_directory_config')
@property @pulumi.getter(name='serviceDirectoryConfig') def service_directory_config(self) -> 'outputs.ManagedZoneServiceDirectoryConfigResponse': '\n \n ' return pulumi.get(self, 'service_directory_config')<|docstring|>This field links to the associated service directory namespace. Do not set this field for public zones or forwarding zones.<|endoftext|>
c00d1d01fbcef72ecd879cff8e917d41cadc8b3faed09a89b9dbe08b574ad909
@property @pulumi.getter def visibility(self) -> str: "\n The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.\n " return pulumi.get(self, 'visibility')
The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.
sdk/python/pulumi_google_native/dns/v1beta2/get_managed_zone.py
visibility
AaronFriel/pulumi-google-native
44
python
@property @pulumi.getter def visibility(self) -> str: "\n \n " return pulumi.get(self, 'visibility')
@property @pulumi.getter def visibility(self) -> str: "\n \n " return pulumi.get(self, 'visibility')<|docstring|>The zone's visibility: public zones are exposed to the Internet, while private zones are visible only to Virtual Private Cloud resources.<|endoftext|>
917bab3ce30492d3411d440d925f796c06f50aa7f9df870d941ab4b3b3878279
@erdos.profile_method() def on_control_msg(self, msg): ' Invoked when a ControlMessage is received.\n\n Args:\n msg: A control.messages.ControlMessage message.\n ' self._logger.debug('@{}: received control message'.format(msg.timestamp)) if (self._flags.carla_mode == 'pseudo-asynchronous'): heapq.heappush(self._tick_events, (msg.timestamp.coordinates[0], TickEvent.CONTROL_CMD)) self._control_msgs[msg.timestamp.coordinates[0]] = msg self._consume_next_event() else: if (self._flags.control not in ['carla_auto_pilot', 'manual']): self._apply_control_msg(msg) self._tick_simulator()
Invoked when a ControlMessage is received. Args: msg: A control.messages.ControlMessage message.
pylot/simulation/carla_operator.py
on_control_msg
pschafhalter/pylot
0
python
@erdos.profile_method() def on_control_msg(self, msg): ' Invoked when a ControlMessage is received.\n\n Args:\n msg: A control.messages.ControlMessage message.\n ' self._logger.debug('@{}: received control message'.format(msg.timestamp)) if (self._flags.carla_mode == 'pseudo-asynchronous'): heapq.heappush(self._tick_events, (msg.timestamp.coordinates[0], TickEvent.CONTROL_CMD)) self._control_msgs[msg.timestamp.coordinates[0]] = msg self._consume_next_event() else: if (self._flags.control not in ['carla_auto_pilot', 'manual']): self._apply_control_msg(msg) self._tick_simulator()
@erdos.profile_method() def on_control_msg(self, msg): ' Invoked when a ControlMessage is received.\n\n Args:\n msg: A control.messages.ControlMessage message.\n ' self._logger.debug('@{}: received control message'.format(msg.timestamp)) if (self._flags.carla_mode == 'pseudo-asynchronous'): heapq.heappush(self._tick_events, (msg.timestamp.coordinates[0], TickEvent.CONTROL_CMD)) self._control_msgs[msg.timestamp.coordinates[0]] = msg self._consume_next_event() else: if (self._flags.control not in ['carla_auto_pilot', 'manual']): self._apply_control_msg(msg) self._tick_simulator()<|docstring|>Invoked when a ControlMessage is received. Args: msg: A control.messages.ControlMessage message.<|endoftext|>
df00b156a84faeccbccce90200c9938284cbed923abf191294f76f6baf9a2400
def send_actor_data(self, msg): ' Callback function that gets called when the world is ticked.\n This function sends a WatermarkMessage to the downstream operators as\n a signal that they need to release data to the rest of the pipeline.\n\n Args:\n msg: Data recieved from the simulation at a tick.\n ' with self._lock: game_time = int((msg.elapsed_seconds * 1000)) self._logger.info('The world is at the timestamp {}'.format(game_time)) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) with erdos.profile((self.config.name + '.send_actor_data'), self, event_data={'timestamp': str(timestamp)}): if ((self._flags.carla_localization_frequency == (- 1)) or (self._next_localization_sensor_reading is None) or (game_time == self._next_localization_sensor_reading)): if (self._flags.carla_mode == 'pseudo-asynchronous'): self._update_next_localization_pseudo_async_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream, timestamp, watermark_msg) self.__send_ground_actors_data(timestamp, watermark_msg) self.__update_spectactor_pose() if ((self._flags.carla_mode == 'pseudo-asynchronous') and ((self._flags.carla_control_frequency == (- 1)) or (self._next_control_sensor_reading is None) or (game_time == self._next_control_sensor_reading))): self._update_next_control_pseudo_asynchronous_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream_for_control, timestamp, watermark_msg) self.__update_spectactor_pose()
Callback function that gets called when the world is ticked. This function sends a WatermarkMessage to the downstream operators as a signal that they need to release data to the rest of the pipeline. Args: msg: Data recieved from the simulation at a tick.
pylot/simulation/carla_operator.py
send_actor_data
pschafhalter/pylot
0
python
def send_actor_data(self, msg): ' Callback function that gets called when the world is ticked.\n This function sends a WatermarkMessage to the downstream operators as\n a signal that they need to release data to the rest of the pipeline.\n\n Args:\n msg: Data recieved from the simulation at a tick.\n ' with self._lock: game_time = int((msg.elapsed_seconds * 1000)) self._logger.info('The world is at the timestamp {}'.format(game_time)) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) with erdos.profile((self.config.name + '.send_actor_data'), self, event_data={'timestamp': str(timestamp)}): if ((self._flags.carla_localization_frequency == (- 1)) or (self._next_localization_sensor_reading is None) or (game_time == self._next_localization_sensor_reading)): if (self._flags.carla_mode == 'pseudo-asynchronous'): self._update_next_localization_pseudo_async_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream, timestamp, watermark_msg) self.__send_ground_actors_data(timestamp, watermark_msg) self.__update_spectactor_pose() if ((self._flags.carla_mode == 'pseudo-asynchronous') and ((self._flags.carla_control_frequency == (- 1)) or (self._next_control_sensor_reading is None) or (game_time == self._next_control_sensor_reading))): self._update_next_control_pseudo_asynchronous_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream_for_control, timestamp, watermark_msg) self.__update_spectactor_pose()
def send_actor_data(self, msg): ' Callback function that gets called when the world is ticked.\n This function sends a WatermarkMessage to the downstream operators as\n a signal that they need to release data to the rest of the pipeline.\n\n Args:\n msg: Data recieved from the simulation at a tick.\n ' with self._lock: game_time = int((msg.elapsed_seconds * 1000)) self._logger.info('The world is at the timestamp {}'.format(game_time)) timestamp = erdos.Timestamp(coordinates=[game_time]) watermark_msg = erdos.WatermarkMessage(timestamp) with erdos.profile((self.config.name + '.send_actor_data'), self, event_data={'timestamp': str(timestamp)}): if ((self._flags.carla_localization_frequency == (- 1)) or (self._next_localization_sensor_reading is None) or (game_time == self._next_localization_sensor_reading)): if (self._flags.carla_mode == 'pseudo-asynchronous'): self._update_next_localization_pseudo_async_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream, timestamp, watermark_msg) self.__send_ground_actors_data(timestamp, watermark_msg) self.__update_spectactor_pose() if ((self._flags.carla_mode == 'pseudo-asynchronous') and ((self._flags.carla_control_frequency == (- 1)) or (self._next_control_sensor_reading is None) or (game_time == self._next_control_sensor_reading))): self._update_next_control_pseudo_asynchronous_ticks(game_time) self.__send_hero_vehicle_data(self.pose_stream_for_control, timestamp, watermark_msg) self.__update_spectactor_pose()<|docstring|>Callback function that gets called when the world is ticked. This function sends a WatermarkMessage to the downstream operators as a signal that they need to release data to the rest of the pipeline. Args: msg: Data recieved from the simulation at a tick.<|endoftext|>
865981785d227c024292fa4eef02f26e3765e9d8a63f1d496965bd2c96a25e0b
def _initialize_world(self): ' Setups the world town, and activates the desired weather.' if (self._carla_version == '0.9.5'): pylot.simulation.utils.reset_world(self._world) else: self._world = self._client.load_world('Town{:02d}'.format(self._flags.carla_town)) self._logger.info('Setting the weather to {}'.format(self._flags.carla_weather)) pylot.simulation.utils.set_weather(self._world, self._flags.carla_weather)
Setups the world town, and activates the desired weather.
pylot/simulation/carla_operator.py
_initialize_world
pschafhalter/pylot
0
python
def _initialize_world(self): ' ' if (self._carla_version == '0.9.5'): pylot.simulation.utils.reset_world(self._world) else: self._world = self._client.load_world('Town{:02d}'.format(self._flags.carla_town)) self._logger.info('Setting the weather to {}'.format(self._flags.carla_weather)) pylot.simulation.utils.set_weather(self._world, self._flags.carla_weather)
def _initialize_world(self): ' ' if (self._carla_version == '0.9.5'): pylot.simulation.utils.reset_world(self._world) else: self._world = self._client.load_world('Town{:02d}'.format(self._flags.carla_town)) self._logger.info('Setting the weather to {}'.format(self._flags.carla_weather)) pylot.simulation.utils.set_weather(self._world, self._flags.carla_weather)<|docstring|>Setups the world town, and activates the desired weather.<|endoftext|>
db67e090737ff6e887ea0789ad5eb522123c951af522dd846f26072d2b8bc671
def __send_world_data(self): ' Sends ego vehicle id, open drive and trajectory messages.' self.vehicle_id_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._ego_vehicle.id)) self.vehicle_id_stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) self.open_drive_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._world.get_map().to_opendrive())) top_watermark = erdos.WatermarkMessage(erdos.Timestamp(is_top=True)) self.open_drive_stream.send(top_watermark) self.global_trajectory_stream.send(top_watermark)
Sends ego vehicle id, open drive and trajectory messages.
pylot/simulation/carla_operator.py
__send_world_data
pschafhalter/pylot
0
python
def __send_world_data(self): ' ' self.vehicle_id_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._ego_vehicle.id)) self.vehicle_id_stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) self.open_drive_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._world.get_map().to_opendrive())) top_watermark = erdos.WatermarkMessage(erdos.Timestamp(is_top=True)) self.open_drive_stream.send(top_watermark) self.global_trajectory_stream.send(top_watermark)
def __send_world_data(self): ' ' self.vehicle_id_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._ego_vehicle.id)) self.vehicle_id_stream.send(erdos.WatermarkMessage(erdos.Timestamp(is_top=True))) self.open_drive_stream.send(erdos.Message(erdos.Timestamp(coordinates=[0]), self._world.get_map().to_opendrive())) top_watermark = erdos.WatermarkMessage(erdos.Timestamp(is_top=True)) self.open_drive_stream.send(top_watermark) self.global_trajectory_stream.send(top_watermark)<|docstring|>Sends ego vehicle id, open drive and trajectory messages.<|endoftext|>
f9cd80677a422c5d6de1ced37654087e232f41716a20a1855629193aae63f256
@classmethod def elements_sequence(cls): 'returning all elements names from ``DeviceUsage`` according specification,\n with preserving original sequence order.\n ' return ['id', 'meta', 'implicitRules', 'language', 'text', 'contained', 'extension', 'modifierExtension', 'identifier', 'basedOn', 'status', 'category', 'subject', 'derivedFrom', 'context', 'timingTiming', 'timingPeriod', 'timingDateTime', 'dateAsserted', 'usageStatus', 'usageReason', 'informationSource', 'device', 'reason', 'bodySite', 'note']
returning all elements names from ``DeviceUsage`` according specification, with preserving original sequence order.
fhir/resources/deviceusage.py
elements_sequence
glichtner/fhir.resources
0
python
@classmethod def elements_sequence(cls): 'returning all elements names from ``DeviceUsage`` according specification,\n with preserving original sequence order.\n ' return ['id', 'meta', 'implicitRules', 'language', 'text', 'contained', 'extension', 'modifierExtension', 'identifier', 'basedOn', 'status', 'category', 'subject', 'derivedFrom', 'context', 'timingTiming', 'timingPeriod', 'timingDateTime', 'dateAsserted', 'usageStatus', 'usageReason', 'informationSource', 'device', 'reason', 'bodySite', 'note']
@classmethod def elements_sequence(cls): 'returning all elements names from ``DeviceUsage`` according specification,\n with preserving original sequence order.\n ' return ['id', 'meta', 'implicitRules', 'language', 'text', 'contained', 'extension', 'modifierExtension', 'identifier', 'basedOn', 'status', 'category', 'subject', 'derivedFrom', 'context', 'timingTiming', 'timingPeriod', 'timingDateTime', 'dateAsserted', 'usageStatus', 'usageReason', 'informationSource', 'device', 'reason', 'bodySite', 'note']<|docstring|>returning all elements names from ``DeviceUsage`` according specification, with preserving original sequence order.<|endoftext|>
be7b7158d459c5a7f40d81c95cd6f04d62455520f6ac27a99cccefc678d0eacf
@root_validator(pre=True, allow_reuse=True) def validate_required_primitive_elements_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n ' required_fields = [('status', 'status__ext')] _missing = object() def _fallback(): return '' errors: typing.List['ErrorWrapper'] = [] for (name, ext) in required_fields: field = cls.__fields__[name] ext_field = cls.__fields__[ext] value = values.get(field.alias, _missing) if (value not in (_missing, None)): continue ext_value = values.get(ext_field.alias, _missing) missing_ext = True if (ext_value not in (_missing, None)): if isinstance(ext_value, dict): missing_ext = (len(ext_value.get('extension', [])) == 0) elif (getattr(ext_value.__class__, 'get_resource_type', _fallback)() == 'FHIRPrimitiveExtension'): if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False else: validate_pass = True for validator in ext_field.type_.__get_validators__(): try: ext_value = validator(v=ext_value) except ValidationError as exc: errors.append(ErrorWrapper(exc, loc=ext_field.alias)) validate_pass = False if (not validate_pass): continue if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False if missing_ext: if (value is _missing): errors.append(ErrorWrapper(MissingError(), loc=field.alias)) else: errors.append(ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)) if (len(errors) > 0): raise ValidationError(errors, cls) return values
https://www.hl7.org/fhir/extensibility.html#Special-Case In some cases, implementers might find that they do not have appropriate data for an element with minimum cardinality = 1. In this case, the element must be present, but unless the resource or a profile on it has made the actual value of the primitive data type mandatory, it is possible to provide an extension that explains why the primitive value is not present.
fhir/resources/deviceusage.py
validate_required_primitive_elements_1262
glichtner/fhir.resources
0
python
@root_validator(pre=True, allow_reuse=True) def validate_required_primitive_elements_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n ' required_fields = [('status', 'status__ext')] _missing = object() def _fallback(): return errors: typing.List['ErrorWrapper'] = [] for (name, ext) in required_fields: field = cls.__fields__[name] ext_field = cls.__fields__[ext] value = values.get(field.alias, _missing) if (value not in (_missing, None)): continue ext_value = values.get(ext_field.alias, _missing) missing_ext = True if (ext_value not in (_missing, None)): if isinstance(ext_value, dict): missing_ext = (len(ext_value.get('extension', [])) == 0) elif (getattr(ext_value.__class__, 'get_resource_type', _fallback)() == 'FHIRPrimitiveExtension'): if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False else: validate_pass = True for validator in ext_field.type_.__get_validators__(): try: ext_value = validator(v=ext_value) except ValidationError as exc: errors.append(ErrorWrapper(exc, loc=ext_field.alias)) validate_pass = False if (not validate_pass): continue if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False if missing_ext: if (value is _missing): errors.append(ErrorWrapper(MissingError(), loc=field.alias)) else: errors.append(ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)) if (len(errors) > 0): raise ValidationError(errors, cls) return values
@root_validator(pre=True, allow_reuse=True) def validate_required_primitive_elements_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/extensibility.html#Special-Case\n In some cases, implementers might find that they do not have appropriate data for\n an element with minimum cardinality = 1. In this case, the element must be present,\n but unless the resource or a profile on it has made the actual value of the primitive\n data type mandatory, it is possible to provide an extension that explains why\n the primitive value is not present.\n ' required_fields = [('status', 'status__ext')] _missing = object() def _fallback(): return errors: typing.List['ErrorWrapper'] = [] for (name, ext) in required_fields: field = cls.__fields__[name] ext_field = cls.__fields__[ext] value = values.get(field.alias, _missing) if (value not in (_missing, None)): continue ext_value = values.get(ext_field.alias, _missing) missing_ext = True if (ext_value not in (_missing, None)): if isinstance(ext_value, dict): missing_ext = (len(ext_value.get('extension', [])) == 0) elif (getattr(ext_value.__class__, 'get_resource_type', _fallback)() == 'FHIRPrimitiveExtension'): if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False else: validate_pass = True for validator in ext_field.type_.__get_validators__(): try: ext_value = validator(v=ext_value) except ValidationError as exc: errors.append(ErrorWrapper(exc, loc=ext_field.alias)) validate_pass = False if (not validate_pass): continue if (ext_value.extension and (len(ext_value.extension) > 0)): missing_ext = False if missing_ext: if (value is _missing): errors.append(ErrorWrapper(MissingError(), loc=field.alias)) else: errors.append(ErrorWrapper(NoneIsNotAllowedError(), loc=field.alias)) if (len(errors) > 0): raise ValidationError(errors, cls) return values<|docstring|>https://www.hl7.org/fhir/extensibility.html#Special-Case In some cases, implementers might find that they do not have appropriate data for an element with minimum cardinality = 1. In this case, the element must be present, but unless the resource or a profile on it has made the actual value of the primitive data type mandatory, it is possible to provide an extension that explains why the primitive value is not present.<|endoftext|>
0464226b7cf1d1a4a0f99ff088548010c14f5a592b34161344dbaebf8e95000f
@root_validator(pre=True, allow_reuse=True) def validate_one_of_many_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n ' one_of_many_fields = {'timing': ['timingDateTime', 'timingPeriod', 'timingTiming']} for (prefix, fields) in one_of_many_fields.items(): assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix) required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True) found = False for field in fields: if ((field in values) and (values[field] is not None)): if (found is True): raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!') else: found = True if ((required is True) and (found is False)): raise ValueError(f'Expect any of field value from this list {fields}.') return values
https://www.hl7.org/fhir/formats.html#choice A few elements have a choice of more than one data type for their content. All such elements have a name that takes the form nnn[x]. The "nnn" part of the name is constant, and the "[x]" is replaced with the title-cased name of the type that is actually used. The table view shows each of these names explicitly. Elements that have a choice of data type cannot repeat - they must have a maximum cardinality of 1. When constructing an instance of an element with a choice of types, the authoring system must create a single element with a data type chosen from among the list of permitted data types.
fhir/resources/deviceusage.py
validate_one_of_many_1262
glichtner/fhir.resources
0
python
@root_validator(pre=True, allow_reuse=True) def validate_one_of_many_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n ' one_of_many_fields = {'timing': ['timingDateTime', 'timingPeriod', 'timingTiming']} for (prefix, fields) in one_of_many_fields.items(): assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix) required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True) found = False for field in fields: if ((field in values) and (values[field] is not None)): if (found is True): raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!') else: found = True if ((required is True) and (found is False)): raise ValueError(f'Expect any of field value from this list {fields}.') return values
@root_validator(pre=True, allow_reuse=True) def validate_one_of_many_1262(cls, values: typing.Dict[(str, typing.Any)]) -> typing.Dict[(str, typing.Any)]: 'https://www.hl7.org/fhir/formats.html#choice\n A few elements have a choice of more than one data type for their content.\n All such elements have a name that takes the form nnn[x].\n The "nnn" part of the name is constant, and the "[x]" is replaced with\n the title-cased name of the type that is actually used.\n The table view shows each of these names explicitly.\n\n Elements that have a choice of data type cannot repeat - they must have a\n maximum cardinality of 1. When constructing an instance of an element with a\n choice of types, the authoring system must create a single element with a\n data type chosen from among the list of permitted data types.\n ' one_of_many_fields = {'timing': ['timingDateTime', 'timingPeriod', 'timingTiming']} for (prefix, fields) in one_of_many_fields.items(): assert (cls.__fields__[fields[0]].field_info.extra['one_of_many'] == prefix) required = (cls.__fields__[fields[0]].field_info.extra['one_of_many_required'] is True) found = False for field in fields: if ((field in values) and (values[field] is not None)): if (found is True): raise ValueError(f'Any of one field value is expected from this list {fields}, but got multiple!') else: found = True if ((required is True) and (found is False)): raise ValueError(f'Expect any of field value from this list {fields}.') return values<|docstring|>https://www.hl7.org/fhir/formats.html#choice A few elements have a choice of more than one data type for their content. All such elements have a name that takes the form nnn[x]. The "nnn" part of the name is constant, and the "[x]" is replaced with the title-cased name of the type that is actually used. The table view shows each of these names explicitly. Elements that have a choice of data type cannot repeat - they must have a maximum cardinality of 1. When constructing an instance of an element with a choice of types, the authoring system must create a single element with a data type chosen from among the list of permitted data types.<|endoftext|>
6e385a24e9a27f86b1a33ef3124c215802dae2c27fc55c059e0b2bfab774302f
def clear_diacritics(text): '\n Remove all standard diacritics from the text, leaving the letters only.\n :param text: str, the diacritized text.\n :return: str, the text undiacritized.\n ' assert isinstance(text, str) return ''.join([l for l in text if (l not in ARABIC_DIACRITICS)])
Remove all standard diacritics from the text, leaving the letters only. :param text: str, the diacritized text. :return: str, the text undiacritized.
pipeline_diacritizer/dataset_preprocessing.py
clear_diacritics
Hamza5/Pipeline-diacritizer
8
python
def clear_diacritics(text): '\n Remove all standard diacritics from the text, leaving the letters only.\n :param text: str, the diacritized text.\n :return: str, the text undiacritized.\n ' assert isinstance(text, str) return .join([l for l in text if (l not in ARABIC_DIACRITICS)])
def clear_diacritics(text): '\n Remove all standard diacritics from the text, leaving the letters only.\n :param text: str, the diacritized text.\n :return: str, the text undiacritized.\n ' assert isinstance(text, str) return .join([l for l in text if (l not in ARABIC_DIACRITICS)])<|docstring|>Remove all standard diacritics from the text, leaving the letters only. :param text: str, the diacritized text. :return: str, the text undiacritized.<|endoftext|>
4f276a34b56d7be7fffb6bc6a7bbf3022f8e363bc441455d27e592597b80acdd
def keep_selected_diacritics(text, diacritics): '\n Remove only the standard diacritics which are not specified.\n :param text: str, the diacritized text.\n :param diacritics: set of str, diacritics to be kept.\n :return: the text without the diacritics that should be removed.\n ' assert isinstance(text, str) assert (isinstance(diacritics, set) and diacritics.issubset(ARABIC_DIACRITICS)) return ''.join([l for l in text if (l not in (ARABIC_DIACRITICS - diacritics))])
Remove only the standard diacritics which are not specified. :param text: str, the diacritized text. :param diacritics: set of str, diacritics to be kept. :return: the text without the diacritics that should be removed.
pipeline_diacritizer/dataset_preprocessing.py
keep_selected_diacritics
Hamza5/Pipeline-diacritizer
8
python
def keep_selected_diacritics(text, diacritics): '\n Remove only the standard diacritics which are not specified.\n :param text: str, the diacritized text.\n :param diacritics: set of str, diacritics to be kept.\n :return: the text without the diacritics that should be removed.\n ' assert isinstance(text, str) assert (isinstance(diacritics, set) and diacritics.issubset(ARABIC_DIACRITICS)) return .join([l for l in text if (l not in (ARABIC_DIACRITICS - diacritics))])
def keep_selected_diacritics(text, diacritics): '\n Remove only the standard diacritics which are not specified.\n :param text: str, the diacritized text.\n :param diacritics: set of str, diacritics to be kept.\n :return: the text without the diacritics that should be removed.\n ' assert isinstance(text, str) assert (isinstance(diacritics, set) and diacritics.issubset(ARABIC_DIACRITICS)) return .join([l for l in text if (l not in (ARABIC_DIACRITICS - diacritics))])<|docstring|>Remove only the standard diacritics which are not specified. :param text: str, the diacritized text. :param diacritics: set of str, diacritics to be kept. :return: the text without the diacritics that should be removed.<|endoftext|>
1ec612731d1abd4722b8fa9021b90cca80db4ce7acbf4052a77888cece2f5a3f
def extract_diacritics(text): '\n Return the diacritics from the text while keeping their original positions.\n :param text: str, the diacritized text.\n :return: list of str, the diacritics\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append('') if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append('') return diacritics
Return the diacritics from the text while keeping their original positions. :param text: str, the diacritized text. :return: list of str, the diacritics
pipeline_diacritizer/dataset_preprocessing.py
extract_diacritics
Hamza5/Pipeline-diacritizer
8
python
def extract_diacritics(text): '\n Return the diacritics from the text while keeping their original positions.\n :param text: str, the diacritized text.\n :return: list of str, the diacritics\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append() if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append() return diacritics
def extract_diacritics(text): '\n Return the diacritics from the text while keeping their original positions.\n :param text: str, the diacritized text.\n :return: list of str, the diacritics\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append() if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append() return diacritics<|docstring|>Return the diacritics from the text while keeping their original positions. :param text: str, the diacritized text. :return: list of str, the diacritics<|endoftext|>
4eed26137dd64570d795a41ff1c755b90666bef137618b648dba267bc3c55b38
def extract_diacritics_2(text): '\n Return the diacritics from the text while keeping their original positions including the Shadda marks.\n :param text: str, the diacritized text.\n :return: list, the diacritics. Positions with double diacritics have a tuple as elements.\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): if (text[(i - 1)] == NAME2DIACRITIC['Shadda']): diacritics[(- 1)] = (text[(i - 1)], text[i]) else: diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append('') if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append('') return diacritics
Return the diacritics from the text while keeping their original positions including the Shadda marks. :param text: str, the diacritized text. :return: list, the diacritics. Positions with double diacritics have a tuple as elements.
pipeline_diacritizer/dataset_preprocessing.py
extract_diacritics_2
Hamza5/Pipeline-diacritizer
8
python
def extract_diacritics_2(text): '\n Return the diacritics from the text while keeping their original positions including the Shadda marks.\n :param text: str, the diacritized text.\n :return: list, the diacritics. Positions with double diacritics have a tuple as elements.\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): if (text[(i - 1)] == NAME2DIACRITIC['Shadda']): diacritics[(- 1)] = (text[(i - 1)], text[i]) else: diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append() if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append() return diacritics
def extract_diacritics_2(text): '\n Return the diacritics from the text while keeping their original positions including the Shadda marks.\n :param text: str, the diacritized text.\n :return: list, the diacritics. Positions with double diacritics have a tuple as elements.\n ' assert isinstance(text, str) diacritics = [] for i in range(1, len(text)): if (text[i] in ARABIC_DIACRITICS): if (text[(i - 1)] == NAME2DIACRITIC['Shadda']): diacritics[(- 1)] = (text[(i - 1)], text[i]) else: diacritics.append(text[i]) elif (text[(i - 1)] not in ARABIC_DIACRITICS): diacritics.append() if (text[(- 1)] not in ARABIC_DIACRITICS): diacritics.append() return diacritics<|docstring|>Return the diacritics from the text while keeping their original positions including the Shadda marks. :param text: str, the diacritized text. :return: list, the diacritics. Positions with double diacritics have a tuple as elements.<|endoftext|>
73da2915de935f432ba579615518371728a151e51cb2dca24b06d05abe7141ac
def merge_diacritics(undiacritized_text, diacritics): '\n Reconstruct the diacritized text from an undiacritized text and a list of corresponding diacritics.\n :param undiacritized_text: str, the undiacritized text.\n :param diacritics: list of str, the corresponding diacritics, as returned by extract_diacritics function.\n :return: str, the diacritized text.\n ' assert isinstance(undiacritized_text, str) assert set(diacritics).issubset(ARABIC_DIACRITICS.union([''])) i = 0 j = 0 sequence = [] while ((i < len(undiacritized_text)) and (j < len(diacritics))): sequence.append(undiacritized_text[i]) i += 1 if (diacritics[j] in ARABIC_DIACRITICS): sequence.append(diacritics[j]) if ((DIACRITIC2NAME[diacritics[j]] == 'Shadda') and ((j + 1) < len(diacritics)) and (diacritics[(j + 1)] in (ARABIC_DIACRITICS - {diacritics[j]}))): sequence.append(diacritics[(j + 1)]) j += 1 j += 1 return ''.join(sequence)
Reconstruct the diacritized text from an undiacritized text and a list of corresponding diacritics. :param undiacritized_text: str, the undiacritized text. :param diacritics: list of str, the corresponding diacritics, as returned by extract_diacritics function. :return: str, the diacritized text.
pipeline_diacritizer/dataset_preprocessing.py
merge_diacritics
Hamza5/Pipeline-diacritizer
8
python
def merge_diacritics(undiacritized_text, diacritics): '\n Reconstruct the diacritized text from an undiacritized text and a list of corresponding diacritics.\n :param undiacritized_text: str, the undiacritized text.\n :param diacritics: list of str, the corresponding diacritics, as returned by extract_diacritics function.\n :return: str, the diacritized text.\n ' assert isinstance(undiacritized_text, str) assert set(diacritics).issubset(ARABIC_DIACRITICS.union([])) i = 0 j = 0 sequence = [] while ((i < len(undiacritized_text)) and (j < len(diacritics))): sequence.append(undiacritized_text[i]) i += 1 if (diacritics[j] in ARABIC_DIACRITICS): sequence.append(diacritics[j]) if ((DIACRITIC2NAME[diacritics[j]] == 'Shadda') and ((j + 1) < len(diacritics)) and (diacritics[(j + 1)] in (ARABIC_DIACRITICS - {diacritics[j]}))): sequence.append(diacritics[(j + 1)]) j += 1 j += 1 return .join(sequence)
def merge_diacritics(undiacritized_text, diacritics): '\n Reconstruct the diacritized text from an undiacritized text and a list of corresponding diacritics.\n :param undiacritized_text: str, the undiacritized text.\n :param diacritics: list of str, the corresponding diacritics, as returned by extract_diacritics function.\n :return: str, the diacritized text.\n ' assert isinstance(undiacritized_text, str) assert set(diacritics).issubset(ARABIC_DIACRITICS.union([])) i = 0 j = 0 sequence = [] while ((i < len(undiacritized_text)) and (j < len(diacritics))): sequence.append(undiacritized_text[i]) i += 1 if (diacritics[j] in ARABIC_DIACRITICS): sequence.append(diacritics[j]) if ((DIACRITIC2NAME[diacritics[j]] == 'Shadda') and ((j + 1) < len(diacritics)) and (diacritics[(j + 1)] in (ARABIC_DIACRITICS - {diacritics[j]}))): sequence.append(diacritics[(j + 1)]) j += 1 j += 1 return .join(sequence)<|docstring|>Reconstruct the diacritized text from an undiacritized text and a list of corresponding diacritics. :param undiacritized_text: str, the undiacritized text. :param diacritics: list of str, the corresponding diacritics, as returned by extract_diacritics function. :return: str, the diacritized text.<|endoftext|>
a9c5d33307752a0792c4f6fa9b9f1c2a5b3415a429dfff462f0eb171df8a4314
def fix_diacritics_errors(diacritized_text): '\n Fix and normalize some diacritization errors in the sentences.\n :param diacritized_text: the text containing the arabic letters with diacritics.\n :return: str, the fixed text.\n ' assert isinstance(diacritized_text, str) diacritized_text = EXTRA_SUKUN_REGEXP.sub('', diacritized_text) diacritized_text = diacritized_text.replace('اً', 'ًا') diacritized_text = DIACRITIC_SHADDA_REGEXP.sub('\\2\\1', diacritized_text) fixed_text = diacritized_text[0] for x in diacritized_text[1:]: if ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] in ARABIC_DIACRITICS)): if ((fixed_text[(- 1)] != NAME2DIACRITIC['Shadda']) or (x == NAME2DIACRITIC['Shadda'])): fixed_text = fixed_text[:(- 1)] elif ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] not in ARABIC_LETTERS)): continue fixed_text += x return fixed_text
Fix and normalize some diacritization errors in the sentences. :param diacritized_text: the text containing the arabic letters with diacritics. :return: str, the fixed text.
pipeline_diacritizer/dataset_preprocessing.py
fix_diacritics_errors
Hamza5/Pipeline-diacritizer
8
python
def fix_diacritics_errors(diacritized_text): '\n Fix and normalize some diacritization errors in the sentences.\n :param diacritized_text: the text containing the arabic letters with diacritics.\n :return: str, the fixed text.\n ' assert isinstance(diacritized_text, str) diacritized_text = EXTRA_SUKUN_REGEXP.sub(, diacritized_text) diacritized_text = diacritized_text.replace('اً', 'ًا') diacritized_text = DIACRITIC_SHADDA_REGEXP.sub('\\2\\1', diacritized_text) fixed_text = diacritized_text[0] for x in diacritized_text[1:]: if ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] in ARABIC_DIACRITICS)): if ((fixed_text[(- 1)] != NAME2DIACRITIC['Shadda']) or (x == NAME2DIACRITIC['Shadda'])): fixed_text = fixed_text[:(- 1)] elif ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] not in ARABIC_LETTERS)): continue fixed_text += x return fixed_text
def fix_diacritics_errors(diacritized_text): '\n Fix and normalize some diacritization errors in the sentences.\n :param diacritized_text: the text containing the arabic letters with diacritics.\n :return: str, the fixed text.\n ' assert isinstance(diacritized_text, str) diacritized_text = EXTRA_SUKUN_REGEXP.sub(, diacritized_text) diacritized_text = diacritized_text.replace('اً', 'ًا') diacritized_text = DIACRITIC_SHADDA_REGEXP.sub('\\2\\1', diacritized_text) fixed_text = diacritized_text[0] for x in diacritized_text[1:]: if ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] in ARABIC_DIACRITICS)): if ((fixed_text[(- 1)] != NAME2DIACRITIC['Shadda']) or (x == NAME2DIACRITIC['Shadda'])): fixed_text = fixed_text[:(- 1)] elif ((x in ARABIC_DIACRITICS) and (fixed_text[(- 1)] not in ARABIC_LETTERS)): continue fixed_text += x return fixed_text<|docstring|>Fix and normalize some diacritization errors in the sentences. :param diacritized_text: the text containing the arabic letters with diacritics. :return: str, the fixed text.<|endoftext|>
344840c6c1e1d4805bc478d2f1178f37d7494d17f9e86f25af6f97bce0ef6234
def clean_text(text): '\n Remove the unwanted characters from the text.\n :param text: str, the unclean text.\n :return: str, the cleaned text.\n ' assert isinstance(text, str) return DATETIME_REGEXP.sub('', text.replace('ـ', '').replace('&quot;', ''))
Remove the unwanted characters from the text. :param text: str, the unclean text. :return: str, the cleaned text.
pipeline_diacritizer/dataset_preprocessing.py
clean_text
Hamza5/Pipeline-diacritizer
8
python
def clean_text(text): '\n Remove the unwanted characters from the text.\n :param text: str, the unclean text.\n :return: str, the cleaned text.\n ' assert isinstance(text, str) return DATETIME_REGEXP.sub(, text.replace('ـ', ).replace('&quot;', ))
def clean_text(text): '\n Remove the unwanted characters from the text.\n :param text: str, the unclean text.\n :return: str, the cleaned text.\n ' assert isinstance(text, str) return DATETIME_REGEXP.sub(, text.replace('ـ', ).replace('&quot;', ))<|docstring|>Remove the unwanted characters from the text. :param text: str, the unclean text. :return: str, the cleaned text.<|endoftext|>
145bbd287ab9e8c636652a657565e43cd6ae9a7870aa608adcad57509202b2fb
def tokenize(sentence): '\n Tokenize a sentence into a list of words.\n :param sentence: str, the sentence to be tokenized.\n :return: list of str, list containing the words.\n ' assert isinstance(sentence, str) return list(filter((lambda x: ((x != '') and x.isprintable())), re.split(WORD_TOKENIZATION_REGEXP, sentence)))
Tokenize a sentence into a list of words. :param sentence: str, the sentence to be tokenized. :return: list of str, list containing the words.
pipeline_diacritizer/dataset_preprocessing.py
tokenize
Hamza5/Pipeline-diacritizer
8
python
def tokenize(sentence): '\n Tokenize a sentence into a list of words.\n :param sentence: str, the sentence to be tokenized.\n :return: list of str, list containing the words.\n ' assert isinstance(sentence, str) return list(filter((lambda x: ((x != ) and x.isprintable())), re.split(WORD_TOKENIZATION_REGEXP, sentence)))
def tokenize(sentence): '\n Tokenize a sentence into a list of words.\n :param sentence: str, the sentence to be tokenized.\n :return: list of str, list containing the words.\n ' assert isinstance(sentence, str) return list(filter((lambda x: ((x != ) and x.isprintable())), re.split(WORD_TOKENIZATION_REGEXP, sentence)))<|docstring|>Tokenize a sentence into a list of words. :param sentence: str, the sentence to be tokenized. :return: list of str, list containing the words.<|endoftext|>
2ebef728a8c5902dbb1e25829e961ef6b8582929713ca5a395e66878c705a22a
def filter_tokenized_sentence(sentence, min_words=2, min_word_diac_rate=0.8, min_word_diac_ratio=0.5): '\n Accept or void a sentence, and clean the tokens.\n :param sentence: the sentence to be filtered.\n :param min_words: minimum number of arabic words that must be left in the cleaned sentence in order to be accepted.\n :param min_word_diac_rate: rate of the diacritized words to the number of arabic words in the sentence.\n :param min_word_diac_ratio: ratio of the diacritized letters to the number of letters in the word.\n :return: list of str, the cleaned tokens or an empty list.\n ' assert (isinstance(sentence, list) and all((isinstance(w, str) for w in sentence))) assert (min_words >= 0) assert (min_word_diac_rate >= 0) new_sentence = [] if (len(sentence) > 0): diac_word_count = 0 arabic_word_count = 0 for token in sentence: token = token.strip() if (not token): continue word_chars = set(token) if ((word_chars & ARABIC_LETTERS) != set()): arabic_word_count += 1 word_diacs = extract_diacritics_2(token) if ((len([x for x in word_diacs if x]) / len(word_diacs)) >= min_word_diac_ratio): diac_word_count += 1 new_sentence.append(token) if ((arabic_word_count > 0) and (arabic_word_count >= min_words)): if ((diac_word_count / arabic_word_count) >= min_word_diac_rate): return new_sentence return []
Accept or void a sentence, and clean the tokens. :param sentence: the sentence to be filtered. :param min_words: minimum number of arabic words that must be left in the cleaned sentence in order to be accepted. :param min_word_diac_rate: rate of the diacritized words to the number of arabic words in the sentence. :param min_word_diac_ratio: ratio of the diacritized letters to the number of letters in the word. :return: list of str, the cleaned tokens or an empty list.
pipeline_diacritizer/dataset_preprocessing.py
filter_tokenized_sentence
Hamza5/Pipeline-diacritizer
8
python
def filter_tokenized_sentence(sentence, min_words=2, min_word_diac_rate=0.8, min_word_diac_ratio=0.5): '\n Accept or void a sentence, and clean the tokens.\n :param sentence: the sentence to be filtered.\n :param min_words: minimum number of arabic words that must be left in the cleaned sentence in order to be accepted.\n :param min_word_diac_rate: rate of the diacritized words to the number of arabic words in the sentence.\n :param min_word_diac_ratio: ratio of the diacritized letters to the number of letters in the word.\n :return: list of str, the cleaned tokens or an empty list.\n ' assert (isinstance(sentence, list) and all((isinstance(w, str) for w in sentence))) assert (min_words >= 0) assert (min_word_diac_rate >= 0) new_sentence = [] if (len(sentence) > 0): diac_word_count = 0 arabic_word_count = 0 for token in sentence: token = token.strip() if (not token): continue word_chars = set(token) if ((word_chars & ARABIC_LETTERS) != set()): arabic_word_count += 1 word_diacs = extract_diacritics_2(token) if ((len([x for x in word_diacs if x]) / len(word_diacs)) >= min_word_diac_ratio): diac_word_count += 1 new_sentence.append(token) if ((arabic_word_count > 0) and (arabic_word_count >= min_words)): if ((diac_word_count / arabic_word_count) >= min_word_diac_rate): return new_sentence return []
def filter_tokenized_sentence(sentence, min_words=2, min_word_diac_rate=0.8, min_word_diac_ratio=0.5): '\n Accept or void a sentence, and clean the tokens.\n :param sentence: the sentence to be filtered.\n :param min_words: minimum number of arabic words that must be left in the cleaned sentence in order to be accepted.\n :param min_word_diac_rate: rate of the diacritized words to the number of arabic words in the sentence.\n :param min_word_diac_ratio: ratio of the diacritized letters to the number of letters in the word.\n :return: list of str, the cleaned tokens or an empty list.\n ' assert (isinstance(sentence, list) and all((isinstance(w, str) for w in sentence))) assert (min_words >= 0) assert (min_word_diac_rate >= 0) new_sentence = [] if (len(sentence) > 0): diac_word_count = 0 arabic_word_count = 0 for token in sentence: token = token.strip() if (not token): continue word_chars = set(token) if ((word_chars & ARABIC_LETTERS) != set()): arabic_word_count += 1 word_diacs = extract_diacritics_2(token) if ((len([x for x in word_diacs if x]) / len(word_diacs)) >= min_word_diac_ratio): diac_word_count += 1 new_sentence.append(token) if ((arabic_word_count > 0) and (arabic_word_count >= min_words)): if ((diac_word_count / arabic_word_count) >= min_word_diac_rate): return new_sentence return []<|docstring|>Accept or void a sentence, and clean the tokens. :param sentence: the sentence to be filtered. :param min_words: minimum number of arabic words that must be left in the cleaned sentence in order to be accepted. :param min_word_diac_rate: rate of the diacritized words to the number of arabic words in the sentence. :param min_word_diac_ratio: ratio of the diacritized letters to the number of letters in the word. :return: list of str, the cleaned tokens or an empty list.<|endoftext|>
24504c9d6a56638df64b9ed7bbe2f3dca63fdd7f7eb39de7f895d13ea387f090
def read_text_file(file_path): '\n Reads a text file and returns a list of individual sentences.\n :param file_path: The path of the file.\n :return: list of str, each str is a sentence.\n ' assert isinstance(file_path, str) sentences = [] with open(file_path, 'rt', encoding='utf-8') as dataset_file: for line in dataset_file: line = clean_text(line.strip((SPACES + '\n'))) if (line == ''): continue fragments = list(filter((lambda x: (x != '')), [x.strip(SPACES) for x in re.split(SENTENCE_TOKENIZATION_REGEXP, line) if (x is not None)])) if (len(fragments) > 1): for (f1, f2) in zip(fragments[:(- 1)], fragments[1:]): if set(f2).issubset(set(SENTENCE_SEPARATORS)): sentences.append((f1 + f2)) elif set(f1).issubset(set(SENTENCE_SEPARATORS)): continue else: sentences.append(f1) else: sentences.extend(fragments) return sentences
Reads a text file and returns a list of individual sentences. :param file_path: The path of the file. :return: list of str, each str is a sentence.
pipeline_diacritizer/dataset_preprocessing.py
read_text_file
Hamza5/Pipeline-diacritizer
8
python
def read_text_file(file_path): '\n Reads a text file and returns a list of individual sentences.\n :param file_path: The path of the file.\n :return: list of str, each str is a sentence.\n ' assert isinstance(file_path, str) sentences = [] with open(file_path, 'rt', encoding='utf-8') as dataset_file: for line in dataset_file: line = clean_text(line.strip((SPACES + '\n'))) if (line == ): continue fragments = list(filter((lambda x: (x != )), [x.strip(SPACES) for x in re.split(SENTENCE_TOKENIZATION_REGEXP, line) if (x is not None)])) if (len(fragments) > 1): for (f1, f2) in zip(fragments[:(- 1)], fragments[1:]): if set(f2).issubset(set(SENTENCE_SEPARATORS)): sentences.append((f1 + f2)) elif set(f1).issubset(set(SENTENCE_SEPARATORS)): continue else: sentences.append(f1) else: sentences.extend(fragments) return sentences
def read_text_file(file_path): '\n Reads a text file and returns a list of individual sentences.\n :param file_path: The path of the file.\n :return: list of str, each str is a sentence.\n ' assert isinstance(file_path, str) sentences = [] with open(file_path, 'rt', encoding='utf-8') as dataset_file: for line in dataset_file: line = clean_text(line.strip((SPACES + '\n'))) if (line == ): continue fragments = list(filter((lambda x: (x != )), [x.strip(SPACES) for x in re.split(SENTENCE_TOKENIZATION_REGEXP, line) if (x is not None)])) if (len(fragments) > 1): for (f1, f2) in zip(fragments[:(- 1)], fragments[1:]): if set(f2).issubset(set(SENTENCE_SEPARATORS)): sentences.append((f1 + f2)) elif set(f1).issubset(set(SENTENCE_SEPARATORS)): continue else: sentences.append(f1) else: sentences.extend(fragments) return sentences<|docstring|>Reads a text file and returns a list of individual sentences. :param file_path: The path of the file. :return: list of str, each str is a sentence.<|endoftext|>
544009f73130bad22280b1641b62e4f47e8d518813bdd73ba45a9af74a937fb8
def text_to_indices(text): '\n Transform a sentence into an indices vector.\n :param text: str, the sentence to be transformed.\n :return: ndarray, 1D NumPy array representing the new format.\n ' assert (isinstance(text, str) and ((set(text) & ARABIC_DIACRITICS) == set())) char_vectors = np.empty((len(text),)) for i in range(len(text)): if (text[i] in ARABIC_LETTERS): char_vectors[i] = CHAR2INDEX[text[i]] elif text[i].isnumeric(): char_vectors[i] = CHAR2INDEX['0'] elif text[i].isspace(): char_vectors[i] = CHAR2INDEX[' '] return char_vectors
Transform a sentence into an indices vector. :param text: str, the sentence to be transformed. :return: ndarray, 1D NumPy array representing the new format.
pipeline_diacritizer/dataset_preprocessing.py
text_to_indices
Hamza5/Pipeline-diacritizer
8
python
def text_to_indices(text): '\n Transform a sentence into an indices vector.\n :param text: str, the sentence to be transformed.\n :return: ndarray, 1D NumPy array representing the new format.\n ' assert (isinstance(text, str) and ((set(text) & ARABIC_DIACRITICS) == set())) char_vectors = np.empty((len(text),)) for i in range(len(text)): if (text[i] in ARABIC_LETTERS): char_vectors[i] = CHAR2INDEX[text[i]] elif text[i].isnumeric(): char_vectors[i] = CHAR2INDEX['0'] elif text[i].isspace(): char_vectors[i] = CHAR2INDEX[' '] return char_vectors
def text_to_indices(text): '\n Transform a sentence into an indices vector.\n :param text: str, the sentence to be transformed.\n :return: ndarray, 1D NumPy array representing the new format.\n ' assert (isinstance(text, str) and ((set(text) & ARABIC_DIACRITICS) == set())) char_vectors = np.empty((len(text),)) for i in range(len(text)): if (text[i] in ARABIC_LETTERS): char_vectors[i] = CHAR2INDEX[text[i]] elif text[i].isnumeric(): char_vectors[i] = CHAR2INDEX['0'] elif text[i].isspace(): char_vectors[i] = CHAR2INDEX[' '] return char_vectors<|docstring|>Transform a sentence into an indices vector. :param text: str, the sentence to be transformed. :return: ndarray, 1D NumPy array representing the new format.<|endoftext|>
dd1374c3cd4b656c49a76c20a103ee64b9300eb7d192c1a7389e4cf5e594c1aa
def add_time_steps(one_hot_matrix, time_steps, word_level): '\n Transform a 2D one-hot matrix into a 3D one containing time steps.\n :param one_hot_matrix: ndarray, the one-hot matrix\n :param time_steps: int, the number of time steps\n :param word_level: bool, if True then each instance will represent a word.\n :return: ndarray, 3D matrix with time steps as a second dimension.\n ' assert (isinstance(one_hot_matrix, np.ndarray) and (len(one_hot_matrix.shape) == 2)) assert (isinstance(time_steps, int) and (time_steps > 1)) assert isinstance(word_level, bool) space_indices = np.concatenate((np.flatnonzero((np.argmax(one_hot_matrix, axis=1) == CHAR2INDEX[' '])), np.array(one_hot_matrix.shape[0:1]))) X = np.empty(((one_hot_matrix.shape[0] if (not word_level) else space_indices.shape[0]), time_steps, one_hot_matrix.shape[1])) offset = ((time_steps - 1) if (not word_level) else max((time_steps - space_indices[0]), 1)) padded_one_hot = np.concatenate((np.zeros((offset, one_hot_matrix.shape[1])), one_hot_matrix)) if (not word_level): for i in range(X.shape[0]): X[i] = padded_one_hot[i:(i + time_steps)] else: space_indices += offset for i in range(X.shape[0]): X[i] = padded_one_hot[(space_indices[i] - time_steps):space_indices[i]] return X
Transform a 2D one-hot matrix into a 3D one containing time steps. :param one_hot_matrix: ndarray, the one-hot matrix :param time_steps: int, the number of time steps :param word_level: bool, if True then each instance will represent a word. :return: ndarray, 3D matrix with time steps as a second dimension.
pipeline_diacritizer/dataset_preprocessing.py
add_time_steps
Hamza5/Pipeline-diacritizer
8
python
def add_time_steps(one_hot_matrix, time_steps, word_level): '\n Transform a 2D one-hot matrix into a 3D one containing time steps.\n :param one_hot_matrix: ndarray, the one-hot matrix\n :param time_steps: int, the number of time steps\n :param word_level: bool, if True then each instance will represent a word.\n :return: ndarray, 3D matrix with time steps as a second dimension.\n ' assert (isinstance(one_hot_matrix, np.ndarray) and (len(one_hot_matrix.shape) == 2)) assert (isinstance(time_steps, int) and (time_steps > 1)) assert isinstance(word_level, bool) space_indices = np.concatenate((np.flatnonzero((np.argmax(one_hot_matrix, axis=1) == CHAR2INDEX[' '])), np.array(one_hot_matrix.shape[0:1]))) X = np.empty(((one_hot_matrix.shape[0] if (not word_level) else space_indices.shape[0]), time_steps, one_hot_matrix.shape[1])) offset = ((time_steps - 1) if (not word_level) else max((time_steps - space_indices[0]), 1)) padded_one_hot = np.concatenate((np.zeros((offset, one_hot_matrix.shape[1])), one_hot_matrix)) if (not word_level): for i in range(X.shape[0]): X[i] = padded_one_hot[i:(i + time_steps)] else: space_indices += offset for i in range(X.shape[0]): X[i] = padded_one_hot[(space_indices[i] - time_steps):space_indices[i]] return X
def add_time_steps(one_hot_matrix, time_steps, word_level): '\n Transform a 2D one-hot matrix into a 3D one containing time steps.\n :param one_hot_matrix: ndarray, the one-hot matrix\n :param time_steps: int, the number of time steps\n :param word_level: bool, if True then each instance will represent a word.\n :return: ndarray, 3D matrix with time steps as a second dimension.\n ' assert (isinstance(one_hot_matrix, np.ndarray) and (len(one_hot_matrix.shape) == 2)) assert (isinstance(time_steps, int) and (time_steps > 1)) assert isinstance(word_level, bool) space_indices = np.concatenate((np.flatnonzero((np.argmax(one_hot_matrix, axis=1) == CHAR2INDEX[' '])), np.array(one_hot_matrix.shape[0:1]))) X = np.empty(((one_hot_matrix.shape[0] if (not word_level) else space_indices.shape[0]), time_steps, one_hot_matrix.shape[1])) offset = ((time_steps - 1) if (not word_level) else max((time_steps - space_indices[0]), 1)) padded_one_hot = np.concatenate((np.zeros((offset, one_hot_matrix.shape[1])), one_hot_matrix)) if (not word_level): for i in range(X.shape[0]): X[i] = padded_one_hot[i:(i + time_steps)] else: space_indices += offset for i in range(X.shape[0]): X[i] = padded_one_hot[(space_indices[i] - time_steps):space_indices[i]] return X<|docstring|>Transform a 2D one-hot matrix into a 3D one containing time steps. :param one_hot_matrix: ndarray, the one-hot matrix :param time_steps: int, the number of time steps :param word_level: bool, if True then each instance will represent a word. :return: ndarray, 3D matrix with time steps as a second dimension.<|endoftext|>
0d204abb5549f8a106bf77c88a37fb970e0949175499b69c0d66f28b5b4af526
def input_to_sentence(batch, word_level): '\n Revert an input batch again to text format.\n :param batch: ndarray, an input batch representing a sentence.\n :param word_level: bool, True if an instance corresponds to a word, False otherwise.\n :return: str, the original text.\n ' assert (isinstance(batch, np.ndarray) and (len(batch.shape) == 3)) assert isinstance(word_level, bool) if (not word_level): one_hot = batch[(:, (- 1))] indices = np.argmax(one_hot, axis=1) return ''.join([INDEX2CHAR[i] for i in indices]) else: sentence = '' for row in batch: word = [] i = (row.shape[0] - 1) while ((i > (- 1)) and np.any(row[i]) and (np.argmax(row[i]) != CHAR2INDEX[' '])): word.append(INDEX2CHAR[np.argmax(row[i])]) i -= 1 sentence += (''.join(reversed(word)) + ' ') return sentence[:(- 1)]
Revert an input batch again to text format. :param batch: ndarray, an input batch representing a sentence. :param word_level: bool, True if an instance corresponds to a word, False otherwise. :return: str, the original text.
pipeline_diacritizer/dataset_preprocessing.py
input_to_sentence
Hamza5/Pipeline-diacritizer
8
python
def input_to_sentence(batch, word_level): '\n Revert an input batch again to text format.\n :param batch: ndarray, an input batch representing a sentence.\n :param word_level: bool, True if an instance corresponds to a word, False otherwise.\n :return: str, the original text.\n ' assert (isinstance(batch, np.ndarray) and (len(batch.shape) == 3)) assert isinstance(word_level, bool) if (not word_level): one_hot = batch[(:, (- 1))] indices = np.argmax(one_hot, axis=1) return .join([INDEX2CHAR[i] for i in indices]) else: sentence = for row in batch: word = [] i = (row.shape[0] - 1) while ((i > (- 1)) and np.any(row[i]) and (np.argmax(row[i]) != CHAR2INDEX[' '])): word.append(INDEX2CHAR[np.argmax(row[i])]) i -= 1 sentence += (.join(reversed(word)) + ' ') return sentence[:(- 1)]
def input_to_sentence(batch, word_level): '\n Revert an input batch again to text format.\n :param batch: ndarray, an input batch representing a sentence.\n :param word_level: bool, True if an instance corresponds to a word, False otherwise.\n :return: str, the original text.\n ' assert (isinstance(batch, np.ndarray) and (len(batch.shape) == 3)) assert isinstance(word_level, bool) if (not word_level): one_hot = batch[(:, (- 1))] indices = np.argmax(one_hot, axis=1) return .join([INDEX2CHAR[i] for i in indices]) else: sentence = for row in batch: word = [] i = (row.shape[0] - 1) while ((i > (- 1)) and np.any(row[i]) and (np.argmax(row[i]) != CHAR2INDEX[' '])): word.append(INDEX2CHAR[np.argmax(row[i])]) i -= 1 sentence += (.join(reversed(word)) + ' ') return sentence[:(- 1)]<|docstring|>Revert an input batch again to text format. :param batch: ndarray, an input batch representing a sentence. :param word_level: bool, True if an instance corresponds to a word, False otherwise. :return: str, the original text.<|endoftext|>
0e05fc302be6d4c41363cb9ab31917c7d826e27b2d0e5320e7aead4460efbb24
@torch.no_grad() def calculate_nll(model, test_loader, criterion, args, iw_samples): '\n model:\n test_loader:\n iw_samples: Number of Importance Weighting samples used for approximating log-likelihood.\n ' model.eval() (img, _) = next(iter(test_loader)) img_shape = img.shape[1:] likelihood_test = [] for (i, (x_imgs, _)) in enumerate(test_loader): iw_array = [] for _ in range(iw_samples): x_imgs = x_imgs.to(args.device) output = model(x_imgs) (nelbo, _) = criterion(x_imgs, output, model) iw_array.append(nelbo.item()) nll_x = (- logsumexp(torch.tensor(iw_array))) likelihood_test.append((nll_x + np.log(len(iw_array)))) print(i, '/', len(test_loader)) nll = (- torch.tensor(likelihood_test).mean().item()) bpd = (nll / (np.prod(img_shape) * np.log(2.0))) return bpd
model: test_loader: iw_samples: Number of Importance Weighting samples used for approximating log-likelihood.
adv/srVAE/src/modules/loss.py
calculate_nll
eliphatfs/adversarial
60
python
@torch.no_grad() def calculate_nll(model, test_loader, criterion, args, iw_samples): '\n model:\n test_loader:\n iw_samples: Number of Importance Weighting samples used for approximating log-likelihood.\n ' model.eval() (img, _) = next(iter(test_loader)) img_shape = img.shape[1:] likelihood_test = [] for (i, (x_imgs, _)) in enumerate(test_loader): iw_array = [] for _ in range(iw_samples): x_imgs = x_imgs.to(args.device) output = model(x_imgs) (nelbo, _) = criterion(x_imgs, output, model) iw_array.append(nelbo.item()) nll_x = (- logsumexp(torch.tensor(iw_array))) likelihood_test.append((nll_x + np.log(len(iw_array)))) print(i, '/', len(test_loader)) nll = (- torch.tensor(likelihood_test).mean().item()) bpd = (nll / (np.prod(img_shape) * np.log(2.0))) return bpd
@torch.no_grad() def calculate_nll(model, test_loader, criterion, args, iw_samples): '\n model:\n test_loader:\n iw_samples: Number of Importance Weighting samples used for approximating log-likelihood.\n ' model.eval() (img, _) = next(iter(test_loader)) img_shape = img.shape[1:] likelihood_test = [] for (i, (x_imgs, _)) in enumerate(test_loader): iw_array = [] for _ in range(iw_samples): x_imgs = x_imgs.to(args.device) output = model(x_imgs) (nelbo, _) = criterion(x_imgs, output, model) iw_array.append(nelbo.item()) nll_x = (- logsumexp(torch.tensor(iw_array))) likelihood_test.append((nll_x + np.log(len(iw_array)))) print(i, '/', len(test_loader)) nll = (- torch.tensor(likelihood_test).mean().item()) bpd = (nll / (np.prod(img_shape) * np.log(2.0))) return bpd<|docstring|>model: test_loader: iw_samples: Number of Importance Weighting samples used for approximating log-likelihood.<|endoftext|>
af72d1e441e40007ccb20c43a9727711a144d7a055fdcc5fabd6022c4c7cea9e
def HighThreshold(mode, magnitude, sample_size): "Returns the high threshold for hypothesis testing.\n\n Args:\n mode: 'functional' or 'performance'. We use different significance\n thresholds for each type.\n magnitude: An estimate of the size of differences to look for. We need\n more values to find smaller differences. If mode is 'functional',\n this is the failure rate, a float between 0 and 1. If mode is\n 'performance', this is a multiple of the interquartile range (IQR).\n sample_size: The number of values in each sample.\n\n Returns:\n The high significance threshold.\n " if (mode == 'functional'): thresholds = _HIGH_THRESHOLDS_FUNCTIONAL magnitude_index = (int((magnitude * 10)) - 1) elif (mode == 'performance'): thresholds = _HIGH_THRESHOLDS_PERFORMANCE magnitude_index = (int((magnitude * 10)) - 10) else: raise NotImplementedError() magnitude_index = max(magnitude_index, 0) magnitude_index = min(magnitude_index, (len(thresholds) - 1)) sample_size_index = (min(sample_size, len(thresholds[magnitude_index])) - 1) return thresholds[magnitude_index][sample_size_index]
Returns the high threshold for hypothesis testing. Args: mode: 'functional' or 'performance'. We use different significance thresholds for each type. magnitude: An estimate of the size of differences to look for. We need more values to find smaller differences. If mode is 'functional', this is the failure rate, a float between 0 and 1. If mode is 'performance', this is a multiple of the interquartile range (IQR). sample_size: The number of values in each sample. Returns: The high significance threshold.
dashboard/dashboard/pinpoint/models/compare/thresholds.py
HighThreshold
blezek/catapult
1
python
def HighThreshold(mode, magnitude, sample_size): "Returns the high threshold for hypothesis testing.\n\n Args:\n mode: 'functional' or 'performance'. We use different significance\n thresholds for each type.\n magnitude: An estimate of the size of differences to look for. We need\n more values to find smaller differences. If mode is 'functional',\n this is the failure rate, a float between 0 and 1. If mode is\n 'performance', this is a multiple of the interquartile range (IQR).\n sample_size: The number of values in each sample.\n\n Returns:\n The high significance threshold.\n " if (mode == 'functional'): thresholds = _HIGH_THRESHOLDS_FUNCTIONAL magnitude_index = (int((magnitude * 10)) - 1) elif (mode == 'performance'): thresholds = _HIGH_THRESHOLDS_PERFORMANCE magnitude_index = (int((magnitude * 10)) - 10) else: raise NotImplementedError() magnitude_index = max(magnitude_index, 0) magnitude_index = min(magnitude_index, (len(thresholds) - 1)) sample_size_index = (min(sample_size, len(thresholds[magnitude_index])) - 1) return thresholds[magnitude_index][sample_size_index]
def HighThreshold(mode, magnitude, sample_size): "Returns the high threshold for hypothesis testing.\n\n Args:\n mode: 'functional' or 'performance'. We use different significance\n thresholds for each type.\n magnitude: An estimate of the size of differences to look for. We need\n more values to find smaller differences. If mode is 'functional',\n this is the failure rate, a float between 0 and 1. If mode is\n 'performance', this is a multiple of the interquartile range (IQR).\n sample_size: The number of values in each sample.\n\n Returns:\n The high significance threshold.\n " if (mode == 'functional'): thresholds = _HIGH_THRESHOLDS_FUNCTIONAL magnitude_index = (int((magnitude * 10)) - 1) elif (mode == 'performance'): thresholds = _HIGH_THRESHOLDS_PERFORMANCE magnitude_index = (int((magnitude * 10)) - 10) else: raise NotImplementedError() magnitude_index = max(magnitude_index, 0) magnitude_index = min(magnitude_index, (len(thresholds) - 1)) sample_size_index = (min(sample_size, len(thresholds[magnitude_index])) - 1) return thresholds[magnitude_index][sample_size_index]<|docstring|>Returns the high threshold for hypothesis testing. Args: mode: 'functional' or 'performance'. We use different significance thresholds for each type. magnitude: An estimate of the size of differences to look for. We need more values to find smaller differences. If mode is 'functional', this is the failure rate, a float between 0 and 1. If mode is 'performance', this is a multiple of the interquartile range (IQR). sample_size: The number of values in each sample. Returns: The high significance threshold.<|endoftext|>
8e4102270a30b1dd26a3d84711f1f8ef0078694720d04e2f859dd493112d462b
def LowThreshold(): 'Returns the low threshold for hypothesis testing.' return 0.01
Returns the low threshold for hypothesis testing.
dashboard/dashboard/pinpoint/models/compare/thresholds.py
LowThreshold
blezek/catapult
1
python
def LowThreshold(): return 0.01
def LowThreshold(): return 0.01<|docstring|>Returns the low threshold for hypothesis testing.<|endoftext|>
b97c839916bae4a61464c45cc1f36745775449c7f8759d8109d21d084f6266ce
@app.route('/') def index(): 'Render index template' return flask.render_template('index.html')
Render index template
main/control/index.py
index
smittfaced/AngularMaterialGAE
58
python
@app.route('/') def index(): return flask.render_template('index.html')
@app.route('/') def index(): return flask.render_template('index.html')<|docstring|>Render index template<|endoftext|>
a4bf1c307112b6c966a669f9feb7e82b055e20a3a7d40c2e7440c2828af82be6
@app.context_processor def inject_user(): "Injects 'user' variable into jinja template, so it can be passed into angular. See base.html" user = False if auth.is_logged_in(): user = auth.current_user_db().to_dict(include=User.get_private_properties()) return {'user': user}
Injects 'user' variable into jinja template, so it can be passed into angular. See base.html
main/control/index.py
inject_user
smittfaced/AngularMaterialGAE
58
python
@app.context_processor def inject_user(): user = False if auth.is_logged_in(): user = auth.current_user_db().to_dict(include=User.get_private_properties()) return {'user': user}
@app.context_processor def inject_user(): user = False if auth.is_logged_in(): user = auth.current_user_db().to_dict(include=User.get_private_properties()) return {'user': user}<|docstring|>Injects 'user' variable into jinja template, so it can be passed into angular. See base.html<|endoftext|>
ee52fbe6d97c6b2f37f33000f5f9bb18366df648bb1cd435059ac7f6a9861326
@app.context_processor def inject_config(): "Injects 'app_config' variable into jinja template, so it can be passed into angular. See base.html" config_properties = (Config.get_all_properties() if auth.is_admin() else Config.get_public_properties()) app_config = config.CONFIG_DB.to_dict(include=config_properties) app_config['development'] = config.DEVELOPMENT return {'app_config': app_config}
Injects 'app_config' variable into jinja template, so it can be passed into angular. See base.html
main/control/index.py
inject_config
smittfaced/AngularMaterialGAE
58
python
@app.context_processor def inject_config(): config_properties = (Config.get_all_properties() if auth.is_admin() else Config.get_public_properties()) app_config = config.CONFIG_DB.to_dict(include=config_properties) app_config['development'] = config.DEVELOPMENT return {'app_config': app_config}
@app.context_processor def inject_config(): config_properties = (Config.get_all_properties() if auth.is_admin() else Config.get_public_properties()) app_config = config.CONFIG_DB.to_dict(include=config_properties) app_config['development'] = config.DEVELOPMENT return {'app_config': app_config}<|docstring|>Injects 'app_config' variable into jinja template, so it can be passed into angular. See base.html<|endoftext|>
c2b700378899352066368c7bf6b5cb9b424c3e7db9536ffb2f12292e3cdf1354
@app.context_processor def inject_validators(): "Injects 'validators' variable into jinja template, so it can be passed into angular. See base.html\n Model validators are passed to angular so it can be used for frontend input validation as well\n This prevents code repetition, as we e.g we change property of UserValidator.name to [5, 20]\n and the same validation of user's name (length between 5-20 characters) will be performed in frontend\n as well as in backend\n " return {'validators': {'arg': ArgumentValidator.to_dict(), 'user': UserValidator.to_dict()}}
Injects 'validators' variable into jinja template, so it can be passed into angular. See base.html Model validators are passed to angular so it can be used for frontend input validation as well This prevents code repetition, as we e.g we change property of UserValidator.name to [5, 20] and the same validation of user's name (length between 5-20 characters) will be performed in frontend as well as in backend
main/control/index.py
inject_validators
smittfaced/AngularMaterialGAE
58
python
@app.context_processor def inject_validators(): "Injects 'validators' variable into jinja template, so it can be passed into angular. See base.html\n Model validators are passed to angular so it can be used for frontend input validation as well\n This prevents code repetition, as we e.g we change property of UserValidator.name to [5, 20]\n and the same validation of user's name (length between 5-20 characters) will be performed in frontend\n as well as in backend\n " return {'validators': {'arg': ArgumentValidator.to_dict(), 'user': UserValidator.to_dict()}}
@app.context_processor def inject_validators(): "Injects 'validators' variable into jinja template, so it can be passed into angular. See base.html\n Model validators are passed to angular so it can be used for frontend input validation as well\n This prevents code repetition, as we e.g we change property of UserValidator.name to [5, 20]\n and the same validation of user's name (length between 5-20 characters) will be performed in frontend\n as well as in backend\n " return {'validators': {'arg': ArgumentValidator.to_dict(), 'user': UserValidator.to_dict()}}<|docstring|>Injects 'validators' variable into jinja template, so it can be passed into angular. See base.html Model validators are passed to angular so it can be used for frontend input validation as well This prevents code repetition, as we e.g we change property of UserValidator.name to [5, 20] and the same validation of user's name (length between 5-20 characters) will be performed in frontend as well as in backend<|endoftext|>
a6987b2fe99f1383e5c4ac9c99c29338eea8e5baeb530b60562411c372e2629c
@app.route('/_ah/warmup') def warmup(): 'Warmup requests load application code into a new instance before any live requests reach that instance.\n For more info see GAE docs' return 'success'
Warmup requests load application code into a new instance before any live requests reach that instance. For more info see GAE docs
main/control/index.py
warmup
smittfaced/AngularMaterialGAE
58
python
@app.route('/_ah/warmup') def warmup(): 'Warmup requests load application code into a new instance before any live requests reach that instance.\n For more info see GAE docs' return 'success'
@app.route('/_ah/warmup') def warmup(): 'Warmup requests load application code into a new instance before any live requests reach that instance.\n For more info see GAE docs' return 'success'<|docstring|>Warmup requests load application code into a new instance before any live requests reach that instance. For more info see GAE docs<|endoftext|>
54d7513fe1ae3442f2ef3bfac90cd00fc51a43eff359b1d33c63921efcb3e29f
def __init__(self, **kwargs): '\n Initializes a new CreateContainerScanTargetDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param display_name:\n The value to assign to the display_name property of this CreateContainerScanTargetDetails.\n :type display_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this CreateContainerScanTargetDetails.\n :type compartment_id: str\n\n :param description:\n The value to assign to the description property of this CreateContainerScanTargetDetails.\n :type description: str\n\n :param target_registry:\n The value to assign to the target_registry property of this CreateContainerScanTargetDetails.\n :type target_registry: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n\n :param container_scan_recipe_id:\n The value to assign to the container_scan_recipe_id property of this CreateContainerScanTargetDetails.\n :type container_scan_recipe_id: str\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this CreateContainerScanTargetDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this CreateContainerScanTargetDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n ' self.swagger_types = {'display_name': 'str', 'compartment_id': 'str', 'description': 'str', 'target_registry': 'CreateContainerScanRegistryDetails', 'container_scan_recipe_id': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))'} self.attribute_map = {'display_name': 'displayName', 'compartment_id': 'compartmentId', 'description': 'description', 'target_registry': 'targetRegistry', 'container_scan_recipe_id': 'containerScanRecipeId', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags'} self._display_name = None self._compartment_id = None self._description = None self._target_registry = None self._container_scan_recipe_id = None self._freeform_tags = None self._defined_tags = None
Initializes a new CreateContainerScanTargetDetails object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param display_name: The value to assign to the display_name property of this CreateContainerScanTargetDetails. :type display_name: str :param compartment_id: The value to assign to the compartment_id property of this CreateContainerScanTargetDetails. :type compartment_id: str :param description: The value to assign to the description property of this CreateContainerScanTargetDetails. :type description: str :param target_registry: The value to assign to the target_registry property of this CreateContainerScanTargetDetails. :type target_registry: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails :param container_scan_recipe_id: The value to assign to the container_scan_recipe_id property of this CreateContainerScanTargetDetails. :type container_scan_recipe_id: str :param freeform_tags: The value to assign to the freeform_tags property of this CreateContainerScanTargetDetails. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this CreateContainerScanTargetDetails. :type defined_tags: dict(str, dict(str, object))
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
__init__
ezequielramos/oci-python-sdk
249
python
def __init__(self, **kwargs): '\n Initializes a new CreateContainerScanTargetDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param display_name:\n The value to assign to the display_name property of this CreateContainerScanTargetDetails.\n :type display_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this CreateContainerScanTargetDetails.\n :type compartment_id: str\n\n :param description:\n The value to assign to the description property of this CreateContainerScanTargetDetails.\n :type description: str\n\n :param target_registry:\n The value to assign to the target_registry property of this CreateContainerScanTargetDetails.\n :type target_registry: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n\n :param container_scan_recipe_id:\n The value to assign to the container_scan_recipe_id property of this CreateContainerScanTargetDetails.\n :type container_scan_recipe_id: str\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this CreateContainerScanTargetDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this CreateContainerScanTargetDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n ' self.swagger_types = {'display_name': 'str', 'compartment_id': 'str', 'description': 'str', 'target_registry': 'CreateContainerScanRegistryDetails', 'container_scan_recipe_id': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))'} self.attribute_map = {'display_name': 'displayName', 'compartment_id': 'compartmentId', 'description': 'description', 'target_registry': 'targetRegistry', 'container_scan_recipe_id': 'containerScanRecipeId', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags'} self._display_name = None self._compartment_id = None self._description = None self._target_registry = None self._container_scan_recipe_id = None self._freeform_tags = None self._defined_tags = None
def __init__(self, **kwargs): '\n Initializes a new CreateContainerScanTargetDetails object with values from keyword arguments.\n The following keyword arguments are supported (corresponding to the getters/setters of this class):\n\n :param display_name:\n The value to assign to the display_name property of this CreateContainerScanTargetDetails.\n :type display_name: str\n\n :param compartment_id:\n The value to assign to the compartment_id property of this CreateContainerScanTargetDetails.\n :type compartment_id: str\n\n :param description:\n The value to assign to the description property of this CreateContainerScanTargetDetails.\n :type description: str\n\n :param target_registry:\n The value to assign to the target_registry property of this CreateContainerScanTargetDetails.\n :type target_registry: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n\n :param container_scan_recipe_id:\n The value to assign to the container_scan_recipe_id property of this CreateContainerScanTargetDetails.\n :type container_scan_recipe_id: str\n\n :param freeform_tags:\n The value to assign to the freeform_tags property of this CreateContainerScanTargetDetails.\n :type freeform_tags: dict(str, str)\n\n :param defined_tags:\n The value to assign to the defined_tags property of this CreateContainerScanTargetDetails.\n :type defined_tags: dict(str, dict(str, object))\n\n ' self.swagger_types = {'display_name': 'str', 'compartment_id': 'str', 'description': 'str', 'target_registry': 'CreateContainerScanRegistryDetails', 'container_scan_recipe_id': 'str', 'freeform_tags': 'dict(str, str)', 'defined_tags': 'dict(str, dict(str, object))'} self.attribute_map = {'display_name': 'displayName', 'compartment_id': 'compartmentId', 'description': 'description', 'target_registry': 'targetRegistry', 'container_scan_recipe_id': 'containerScanRecipeId', 'freeform_tags': 'freeformTags', 'defined_tags': 'definedTags'} self._display_name = None self._compartment_id = None self._description = None self._target_registry = None self._container_scan_recipe_id = None self._freeform_tags = None self._defined_tags = None<|docstring|>Initializes a new CreateContainerScanTargetDetails object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param display_name: The value to assign to the display_name property of this CreateContainerScanTargetDetails. :type display_name: str :param compartment_id: The value to assign to the compartment_id property of this CreateContainerScanTargetDetails. :type compartment_id: str :param description: The value to assign to the description property of this CreateContainerScanTargetDetails. :type description: str :param target_registry: The value to assign to the target_registry property of this CreateContainerScanTargetDetails. :type target_registry: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails :param container_scan_recipe_id: The value to assign to the container_scan_recipe_id property of this CreateContainerScanTargetDetails. :type container_scan_recipe_id: str :param freeform_tags: The value to assign to the freeform_tags property of this CreateContainerScanTargetDetails. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this CreateContainerScanTargetDetails. :type defined_tags: dict(str, dict(str, object))<|endoftext|>
fadf9d42456c4870b8b262b7e4b6b43af6779ed56885fab376fb94cd3f96bd86
@property def display_name(self): '\n Gets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :return: The display_name of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._display_name
Gets the display_name of this CreateContainerScanTargetDetails. User friendly name of container scan target. If not present, will be auto-generated. :return: The display_name of this CreateContainerScanTargetDetails. :rtype: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
display_name
ezequielramos/oci-python-sdk
249
python
@property def display_name(self): '\n Gets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :return: The display_name of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._display_name
@property def display_name(self): '\n Gets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :return: The display_name of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._display_name<|docstring|>Gets the display_name of this CreateContainerScanTargetDetails. User friendly name of container scan target. If not present, will be auto-generated. :return: The display_name of this CreateContainerScanTargetDetails. :rtype: str<|endoftext|>
69abf73f6b43efe2291d27f85a670d6405a57ad8c5e04a9843750767a9244484
@display_name.setter def display_name(self, display_name): '\n Sets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :param display_name: The display_name of this CreateContainerScanTargetDetails.\n :type: str\n ' self._display_name = display_name
Sets the display_name of this CreateContainerScanTargetDetails. User friendly name of container scan target. If not present, will be auto-generated. :param display_name: The display_name of this CreateContainerScanTargetDetails. :type: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
display_name
ezequielramos/oci-python-sdk
249
python
@display_name.setter def display_name(self, display_name): '\n Sets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :param display_name: The display_name of this CreateContainerScanTargetDetails.\n :type: str\n ' self._display_name = display_name
@display_name.setter def display_name(self, display_name): '\n Sets the display_name of this CreateContainerScanTargetDetails.\n User friendly name of container scan target. If not present, will be auto-generated.\n\n\n :param display_name: The display_name of this CreateContainerScanTargetDetails.\n :type: str\n ' self._display_name = display_name<|docstring|>Sets the display_name of this CreateContainerScanTargetDetails. User friendly name of container scan target. If not present, will be auto-generated. :param display_name: The display_name of this CreateContainerScanTargetDetails. :type: str<|endoftext|>
c5936ff067d84c4846415805662eabe6033ac2a4f8046a47242a94d496474597
@property def compartment_id(self): "\n **[Required]** Gets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :return: The compartment_id of this CreateContainerScanTargetDetails.\n :rtype: str\n " return self._compartment_id
**[Required]** Gets the compartment_id of this CreateContainerScanTargetDetails. The `OCID`__ of the container scan target's compartment __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :return: The compartment_id of this CreateContainerScanTargetDetails. :rtype: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
compartment_id
ezequielramos/oci-python-sdk
249
python
@property def compartment_id(self): "\n **[Required]** Gets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :return: The compartment_id of this CreateContainerScanTargetDetails.\n :rtype: str\n " return self._compartment_id
@property def compartment_id(self): "\n **[Required]** Gets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :return: The compartment_id of this CreateContainerScanTargetDetails.\n :rtype: str\n " return self._compartment_id<|docstring|>**[Required]** Gets the compartment_id of this CreateContainerScanTargetDetails. The `OCID`__ of the container scan target's compartment __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :return: The compartment_id of this CreateContainerScanTargetDetails. :rtype: str<|endoftext|>
a9df1d9b324b558add4948035dc960faf2f4df6987adf460a3969b4478337dff
@compartment_id.setter def compartment_id(self, compartment_id): "\n Sets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :param compartment_id: The compartment_id of this CreateContainerScanTargetDetails.\n :type: str\n " self._compartment_id = compartment_id
Sets the compartment_id of this CreateContainerScanTargetDetails. The `OCID`__ of the container scan target's compartment __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param compartment_id: The compartment_id of this CreateContainerScanTargetDetails. :type: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
compartment_id
ezequielramos/oci-python-sdk
249
python
@compartment_id.setter def compartment_id(self, compartment_id): "\n Sets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :param compartment_id: The compartment_id of this CreateContainerScanTargetDetails.\n :type: str\n " self._compartment_id = compartment_id
@compartment_id.setter def compartment_id(self, compartment_id): "\n Sets the compartment_id of this CreateContainerScanTargetDetails.\n The `OCID`__ of the container scan target's compartment\n\n __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm\n\n\n :param compartment_id: The compartment_id of this CreateContainerScanTargetDetails.\n :type: str\n " self._compartment_id = compartment_id<|docstring|>Sets the compartment_id of this CreateContainerScanTargetDetails. The `OCID`__ of the container scan target's compartment __ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm :param compartment_id: The compartment_id of this CreateContainerScanTargetDetails. :type: str<|endoftext|>
f235bf08c7056745d2d0ab34fc84c1c558257617c972a273ea0de524b8001153
@property def description(self): '\n Gets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :return: The description of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._description
Gets the description of this CreateContainerScanTargetDetails. Target description. :return: The description of this CreateContainerScanTargetDetails. :rtype: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
description
ezequielramos/oci-python-sdk
249
python
@property def description(self): '\n Gets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :return: The description of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._description
@property def description(self): '\n Gets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :return: The description of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._description<|docstring|>Gets the description of this CreateContainerScanTargetDetails. Target description. :return: The description of this CreateContainerScanTargetDetails. :rtype: str<|endoftext|>
5c4e49affd7148230d65e763ce9dd95c43942ceb15c4cd55ce405c8ad319ba6f
@description.setter def description(self, description): '\n Sets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :param description: The description of this CreateContainerScanTargetDetails.\n :type: str\n ' self._description = description
Sets the description of this CreateContainerScanTargetDetails. Target description. :param description: The description of this CreateContainerScanTargetDetails. :type: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
description
ezequielramos/oci-python-sdk
249
python
@description.setter def description(self, description): '\n Sets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :param description: The description of this CreateContainerScanTargetDetails.\n :type: str\n ' self._description = description
@description.setter def description(self, description): '\n Sets the description of this CreateContainerScanTargetDetails.\n Target description.\n\n\n :param description: The description of this CreateContainerScanTargetDetails.\n :type: str\n ' self._description = description<|docstring|>Sets the description of this CreateContainerScanTargetDetails. Target description. :param description: The description of this CreateContainerScanTargetDetails. :type: str<|endoftext|>
b5f34307853d631fa60bb965642b76a05319bcee71b7f5e1e7b30f681cabe0a9
@property def target_registry(self): '\n **[Required]** Gets the target_registry of this CreateContainerScanTargetDetails.\n\n :return: The target_registry of this CreateContainerScanTargetDetails.\n :rtype: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' return self._target_registry
**[Required]** Gets the target_registry of this CreateContainerScanTargetDetails. :return: The target_registry of this CreateContainerScanTargetDetails. :rtype: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
target_registry
ezequielramos/oci-python-sdk
249
python
@property def target_registry(self): '\n **[Required]** Gets the target_registry of this CreateContainerScanTargetDetails.\n\n :return: The target_registry of this CreateContainerScanTargetDetails.\n :rtype: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' return self._target_registry
@property def target_registry(self): '\n **[Required]** Gets the target_registry of this CreateContainerScanTargetDetails.\n\n :return: The target_registry of this CreateContainerScanTargetDetails.\n :rtype: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' return self._target_registry<|docstring|>**[Required]** Gets the target_registry of this CreateContainerScanTargetDetails. :return: The target_registry of this CreateContainerScanTargetDetails. :rtype: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails<|endoftext|>
83c3c3aa35fcb63f27b79cb5f27833fdf5b66d65d6be4abd78e96c96723ffb29
@target_registry.setter def target_registry(self, target_registry): '\n Sets the target_registry of this CreateContainerScanTargetDetails.\n\n :param target_registry: The target_registry of this CreateContainerScanTargetDetails.\n :type: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' self._target_registry = target_registry
Sets the target_registry of this CreateContainerScanTargetDetails. :param target_registry: The target_registry of this CreateContainerScanTargetDetails. :type: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
target_registry
ezequielramos/oci-python-sdk
249
python
@target_registry.setter def target_registry(self, target_registry): '\n Sets the target_registry of this CreateContainerScanTargetDetails.\n\n :param target_registry: The target_registry of this CreateContainerScanTargetDetails.\n :type: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' self._target_registry = target_registry
@target_registry.setter def target_registry(self, target_registry): '\n Sets the target_registry of this CreateContainerScanTargetDetails.\n\n :param target_registry: The target_registry of this CreateContainerScanTargetDetails.\n :type: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails\n ' self._target_registry = target_registry<|docstring|>Sets the target_registry of this CreateContainerScanTargetDetails. :param target_registry: The target_registry of this CreateContainerScanTargetDetails. :type: oci.vulnerability_scanning.models.CreateContainerScanRegistryDetails<|endoftext|>
0d5dea4be2ff2de3f3c5a9c025cc64fb716e8c939515a045670d3e72552cfc0f
@property def container_scan_recipe_id(self): '\n **[Required]** Gets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :return: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._container_scan_recipe_id
**[Required]** Gets the container_scan_recipe_id of this CreateContainerScanTargetDetails. ID of the container scan recipe this target applies. :return: The container_scan_recipe_id of this CreateContainerScanTargetDetails. :rtype: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
container_scan_recipe_id
ezequielramos/oci-python-sdk
249
python
@property def container_scan_recipe_id(self): '\n **[Required]** Gets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :return: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._container_scan_recipe_id
@property def container_scan_recipe_id(self): '\n **[Required]** Gets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :return: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :rtype: str\n ' return self._container_scan_recipe_id<|docstring|>**[Required]** Gets the container_scan_recipe_id of this CreateContainerScanTargetDetails. ID of the container scan recipe this target applies. :return: The container_scan_recipe_id of this CreateContainerScanTargetDetails. :rtype: str<|endoftext|>
4722e965b11cf9e1699cfb53277a8844c3d65bedb15ae9c029fee2b73feb5914
@container_scan_recipe_id.setter def container_scan_recipe_id(self, container_scan_recipe_id): '\n Sets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :param container_scan_recipe_id: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :type: str\n ' self._container_scan_recipe_id = container_scan_recipe_id
Sets the container_scan_recipe_id of this CreateContainerScanTargetDetails. ID of the container scan recipe this target applies. :param container_scan_recipe_id: The container_scan_recipe_id of this CreateContainerScanTargetDetails. :type: str
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
container_scan_recipe_id
ezequielramos/oci-python-sdk
249
python
@container_scan_recipe_id.setter def container_scan_recipe_id(self, container_scan_recipe_id): '\n Sets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :param container_scan_recipe_id: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :type: str\n ' self._container_scan_recipe_id = container_scan_recipe_id
@container_scan_recipe_id.setter def container_scan_recipe_id(self, container_scan_recipe_id): '\n Sets the container_scan_recipe_id of this CreateContainerScanTargetDetails.\n ID of the container scan recipe this target applies.\n\n\n :param container_scan_recipe_id: The container_scan_recipe_id of this CreateContainerScanTargetDetails.\n :type: str\n ' self._container_scan_recipe_id = container_scan_recipe_id<|docstring|>Sets the container_scan_recipe_id of this CreateContainerScanTargetDetails. ID of the container scan recipe this target applies. :param container_scan_recipe_id: The container_scan_recipe_id of this CreateContainerScanTargetDetails. :type: str<|endoftext|>
fac0b8d3ab5090984d2bea7093e8df961fc23791469b10f9d86cd935c10422d6
@property def freeform_tags(self): '\n Gets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :return: The freeform_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, str)\n ' return self._freeform_tags
Gets the freeform_tags of this CreateContainerScanTargetDetails. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` :return: The freeform_tags of this CreateContainerScanTargetDetails. :rtype: dict(str, str)
src/oci/vulnerability_scanning/models/create_container_scan_target_details.py
freeform_tags
ezequielramos/oci-python-sdk
249
python
@property def freeform_tags(self): '\n Gets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :return: The freeform_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, str)\n ' return self._freeform_tags
@property def freeform_tags(self): '\n Gets the freeform_tags of this CreateContainerScanTargetDetails.\n Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.\n Example: `{"bar-key": "value"}`\n\n\n :return: The freeform_tags of this CreateContainerScanTargetDetails.\n :rtype: dict(str, str)\n ' return self._freeform_tags<|docstring|>Gets the freeform_tags of this CreateContainerScanTargetDetails. Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` :return: The freeform_tags of this CreateContainerScanTargetDetails. :rtype: dict(str, str)<|endoftext|>