body stringlengths 26 98.2k | body_hash int64 -9,222,864,604,528,158,000 9,221,803,474B | docstring stringlengths 1 16.8k | path stringlengths 5 230 | name stringlengths 1 96 | repository_name stringlengths 7 89 | lang stringclasses 1
value | body_without_docstring stringlengths 20 98.2k |
|---|---|---|---|---|---|---|---|
def to_memory_units(memory_bytes, round_up):
'Convert from bytes -> memory units.'
value = (memory_bytes / MEMORY_RESOURCE_UNIT_BYTES)
if (value < 1):
raise ValueError('The minimum amount of memory that can be requested is {} bytes, however {} bytes was asked.'.format(MEMORY_RESOURCE_UNIT_BYTES, mem... | -8,472,874,613,192,096,000 | Convert from bytes -> memory units. | python/ray/ray_constants.py | to_memory_units | stephanie-wang/ray | python | def to_memory_units(memory_bytes, round_up):
value = (memory_bytes / MEMORY_RESOURCE_UNIT_BYTES)
if (value < 1):
raise ValueError('The minimum amount of memory that can be requested is {} bytes, however {} bytes was asked.'.format(MEMORY_RESOURCE_UNIT_BYTES, memory_bytes))
if (isinstance(value,... |
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n ... | -7,717,226,299,015,962,000 | Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumula... | supar/parsers/dep.py | train | LiBinNLP/HOSDP | python | def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n ... |
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The numbe... | 534,450,388,935,096,700 | Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, i... | supar/parsers/dep.py | evaluate | LiBinNLP/HOSDP | python | def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The numbe... |
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=False, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n ... | 7,355,355,497,957,411,000 | Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for ... | supar/parsers/dep.py | predict | LiBinNLP/HOSDP | python | def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=False, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n ... |
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... | 5,227,721,562,502,721,000 | Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'biaffine-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``... | supar/parsers/dep.py | load | LiBinNLP/HOSDP | python | @classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... |
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
'\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The ... | 8,270,736,086,687,907,000 | Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary.
Required if taking words as encoder input.
Default:... | supar/parsers/dep.py | build | LiBinNLP/HOSDP | python | @classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
'\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The ... |
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int... | 4,529,348,555,688,951,000 | Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumula... | supar/parsers/dep.py | train | LiBinNLP/HOSDP | python | def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int... |
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n ... | -7,514,356,962,041,115,000 | Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, i... | supar/parsers/dep.py | evaluate | LiBinNLP/HOSDP | python | def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n ... |
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):... | 3,519,811,644,094,867,000 | Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for ... | supar/parsers/dep.py | predict | LiBinNLP/HOSDP | python | def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):... |
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... | -8,849,996,489,005,211,000 | Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
... | supar/parsers/dep.py | load | LiBinNLP/HOSDP | python | @classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... |
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int... | 4,529,348,555,688,951,000 | Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumula... | supar/parsers/dep.py | train | LiBinNLP/HOSDP | python | def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int... |
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n ... | -7,514,356,962,041,115,000 | Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, i... | supar/parsers/dep.py | evaluate | LiBinNLP/HOSDP | python | def evaluate(self, data, buckets=8, batch_size=5000, punct=False, mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n ... |
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):... | 3,519,811,644,094,867,000 | Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for ... | supar/parsers/dep.py | predict | LiBinNLP/HOSDP | python | def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, mbr=True, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):... |
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... | -8,496,512,337,751,790,000 | Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf2o-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
... | supar/parsers/dep.py | load | LiBinNLP/HOSDP | python | @classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... |
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
'\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The ... | 1,164,278,246,627,036,400 | Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary. Default: 2.
fix_len (int):
The max length of all subwor... | supar/parsers/dep.py | build | LiBinNLP/HOSDP | python | @classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
'\n Build a brand-new Parser, including initialization of all data fields and model parameters.\n\n Args:\n path (str):\n The path of the model to be saved.\n min_freq (str):\n The ... |
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n ... | -7,717,226,299,015,962,000 | Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumula... | supar/parsers/dep.py | train | LiBinNLP/HOSDP | python | def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1, punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
'\n Args:\n train/dev/test (list[list] or str):\n Filenames of the train/dev/test datasets.\n buckets (int):\n ... |
def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number... | -1,705,285,004,826,690,300 | Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, i... | supar/parsers/dep.py | evaluate | LiBinNLP/HOSDP | python | def evaluate(self, data, buckets=8, batch_size=5000, punct=False, tree=True, proj=True, partial=False, verbose=True, **kwargs):
'\n Args:\n data (str):\n The data for evaluation, both list of instances and filename are allowed.\n buckets (int):\n The number... |
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n ... | 606,635,800,211,581,400 | Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for ... | supar/parsers/dep.py | predict | LiBinNLP/HOSDP | python | def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False, tree=True, proj=True, verbose=True, **kwargs):
'\n Args:\n data (list[list] or str):\n The data for prediction, both a list of instances and filename are allowed.\n pred (str):\n ... |
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... | -6,535,850,461,008,373,000 | Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'vi-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
... | supar/parsers/dep.py | load | LiBinNLP/HOSDP | python | @classmethod
def load(cls, path, reload=False, src=None, **kwargs):
"\n Loads a parser with data fields and pretrained model parameters.\n\n Args:\n path (str):\n - a string with the shortcut name of a pretrained model defined in ``supar.MODEL``\n to load fro... |
def parse_arguments():
'Argument parsing for the script'
parser = argparse.ArgumentParser(description='Liftbridge sub script.')
parser.add_argument('subject', metavar='subject')
parser.add_argument('stream', metavar='stream')
parser.add_argument('-s', '--server', metavar='s', nargs='?', default='127... | 2,814,873,715,566,704,000 | Argument parsing for the script | examples/lift-sub.py | parse_arguments | LaPetiteSouris/python-liftbridge | python | def parse_arguments():
parser = argparse.ArgumentParser(description='Liftbridge sub script.')
parser.add_argument('subject', metavar='subject')
parser.add_argument('stream', metavar='stream')
parser.add_argument('-s', '--server', metavar='s', nargs='?', default='127.0.0.1:9292', help='(default: %(d... |
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None):
'\n Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.\n \n :param str resource_name: The name of the resource.\n ... | 6,222,031,625,073,402,000 | Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] instance_ports: List of instance ports to which the policy
shou... | sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py | __init__ | lemonade-hq/pulumi-aws | python | def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None):
'\n Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.\n \n :param str resource_name: The name of the resource.\n ... |
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
'Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.'
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = (... | -2,858,009,355,907,476,500 | Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts. | mlreflect/curve_fitter/minimizer.py | q_shift_variants | schreiber-lab/mlreflect | python | def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift)
interpolated_curves = np.zeros((n_variants, le... |
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
'Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.'
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(correcte... | -7,762,106,858,442,012,000 | Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors. | mlreflect/curve_fitter/minimizer.py | curve_scaling_variants | schreiber-lab/mlreflect | python | def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))
for i in range(n_variants):
scaled_curves[i] = (corrected_reflec... |
def curve_variant_log_mse(curve, variant_curves):
'Calculate the log MSE of a curve and a :class:`ndarray` of curves'
errors = (np.log10(curve) - np.log10(variant_curves))
return np.mean((errors ** 2), axis=1) | 8,469,554,744,767,416,000 | Calculate the log MSE of a curve and a :class:`ndarray` of curves | mlreflect/curve_fitter/minimizer.py | curve_variant_log_mse | schreiber-lab/mlreflect | python | def curve_variant_log_mse(curve, variant_curves):
errors = (np.log10(curve) - np.log10(variant_curves))
return np.mean((errors ** 2), axis=1) |
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)):
'Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.'
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)... | -8,441,526,859,497,473,000 | Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values. | mlreflect/curve_fitter/minimizer.py | least_log_mean_squares_fit | schreiber-lab/mlreflect | python | def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)):
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]
start_values = np.array(prep_labels)[0]
bounds = ([(val - (bound * abs(val))) for (val, bound) in zi... |
def log_mse_loss(prep_labels, data, generator, output_preprocessor):
'MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.'
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels,... | 2,051,504,903,990,957,800 | MSE loss between a reflectivity curve and a model curve generated with the given normalized labels. | mlreflect/curve_fitter/minimizer.py | log_mse_loss | schreiber-lab/mlreflect | python | def log_mse_loss(prep_labels, data, generator, output_preprocessor):
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]
loss = mean_squared_error(np.log10(data), np.log10(model))
return los... |
def mean_squared_error(array1, array2):
'Returns element-wise mean squared error between two arrays.'
if (len(array1) != len(array2)):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = (np.asarray(array1) - np.asarray(array2))
... | 2,385,236,979,828,822,000 | Returns element-wise mean squared error between two arrays. | mlreflect/curve_fitter/minimizer.py | mean_squared_error | schreiber-lab/mlreflect | python | def mean_squared_error(array1, array2):
if (len(array1) != len(array2)):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = (np.asarray(array1) - np.asarray(array2))
return np.mean(np.atleast_2d((error ** 2)), axis=1) |
@hapic.with_api_doc()
@hapic.output_body(AboutSchema())
def about(self):
'\n This endpoint allow to check that the API is running. This description\n is generated from the docstring of the method.\n '
return {'version': '1.2.3', 'datetime': datetime.now()} | 5,390,935,259,571,241,000 | This endpoint allow to check that the API is running. This description
is generated from the docstring of the method. | example/usermanagement/serve_flask_marshmallow.py | about | algoo/hapic | python | @hapic.with_api_doc()
@hapic.output_body(AboutSchema())
def about(self):
'\n This endpoint allow to check that the API is running. This description\n is generated from the docstring of the method.\n '
return {'version': '1.2.3', 'datetime': datetime.now()} |
@hapic.with_api_doc()
@hapic.output_body(UserDigestSchema(many=True))
def get_users(self):
'\n Obtain users list.\n '
return UserLib().get_users() | -3,739,341,336,160,205,000 | Obtain users list. | example/usermanagement/serve_flask_marshmallow.py | get_users | algoo/hapic | python | @hapic.with_api_doc()
@hapic.output_body(UserDigestSchema(many=True))
def get_users(self):
'\n \n '
return UserLib().get_users() |
@hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.input_path(UserIdPathSchema())
@hapic.output_body(UserSchema())
def get_user(self, id, hapic_data: HapicData):
'\n Return a user taken from the list or return a 404\n '
return UserLib().get_user(int(hapic_data... | -8,173,223,262,207,807,000 | Return a user taken from the list or return a 404 | example/usermanagement/serve_flask_marshmallow.py | get_user | algoo/hapic | python | @hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.input_path(UserIdPathSchema())
@hapic.output_body(UserSchema())
def get_user(self, id, hapic_data: HapicData):
'\n \n '
return UserLib().get_user(int(hapic_data.path['id'])) |
@hapic.with_api_doc()
@hapic.input_body(UserSchema(exclude=('id',)))
@hapic.output_body(UserSchema())
def add_user(self, hapic_data: HapicData):
'\n Add a user to the list\n '
new_user = User(**hapic_data.body)
return UserLib().add_user(new_user) | 2,054,484,460,010,922,200 | Add a user to the list | example/usermanagement/serve_flask_marshmallow.py | add_user | algoo/hapic | python | @hapic.with_api_doc()
@hapic.input_body(UserSchema(exclude=('id',)))
@hapic.output_body(UserSchema())
def add_user(self, hapic_data: HapicData):
'\n \n '
new_user = User(**hapic_data.body)
return UserLib().add_user(new_user) |
def transform_audio(self, audio_segment):
'Add background noise audio.\n\n Note that this is an in-place transformation.\n\n :param audio_segment: Audio segment to add effects to.\n :type audio_segment: AudioSegmenet|SpeechSegment\n '
noise_json = self._rng.choice(self._noise_manifes... | 515,606,146,558,555,200 | Add background noise audio.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment | deepspeech/frontend/augmentor/noise_perturb.py | transform_audio | qq1440837150/DeepSpeech | python | def transform_audio(self, audio_segment):
'Add background noise audio.\n\n Note that this is an in-place transformation.\n\n :param audio_segment: Audio segment to add effects to.\n :type audio_segment: AudioSegmenet|SpeechSegment\n '
noise_json = self._rng.choice(self._noise_manifes... |
def test_oc_get_ocp_server_version():
'\n This method get ocp server version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocp_server_version() | 1,846,085,871,210,349,300 | This method get ocp server version
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_ocp_server_version | RobertKrawitz/benchmark-runner | python | def test_oc_get_ocp_server_version():
'\n This method get ocp server version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocp_server_version() |
def test_oc_get_kata_version():
'\n This method gets the sandboxed containers (kata) version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_kata_version() | 6,685,231,822,196,794,000 | This method gets the sandboxed containers (kata) version
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_kata_version | RobertKrawitz/benchmark-runner | python | def test_oc_get_kata_version():
'\n This method gets the sandboxed containers (kata) version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_kata_version() |
def test_oc_get_cnv_version():
'\n This method get cnv version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_cnv_version() | 924,310,108,413,792,000 | This method get cnv version
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_cnv_version | RobertKrawitz/benchmark-runner | python | def test_oc_get_cnv_version():
'\n This method get cnv version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_cnv_version() |
def test_oc_get_ocs_version():
'\n This method get ocs version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocs_version() | 8,321,662,595,867,105,000 | This method get ocs version
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_ocs_version | RobertKrawitz/benchmark-runner | python | def test_oc_get_ocs_version():
'\n This method get ocs version\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_ocs_version() |
def test_oc_get_master_nodes():
'\n This method test get master nodes\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_master_nodes() | -2,892,055,765,142,323,700 | This method test get master nodes
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_master_nodes | RobertKrawitz/benchmark-runner | python | def test_oc_get_master_nodes():
'\n This method test get master nodes\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_master_nodes() |
def test_login():
'\n This method test login\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.login() | 8,158,043,458,917,190,000 | This method test login
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_login | RobertKrawitz/benchmark-runner | python | def test_login():
'\n This method test login\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.login() |
def test_oc_get_pod_name():
'\n This test run oc get pod by name\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert (oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == '') | -593,645,872,348,187,500 | This test run oc get pod by name
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_pod_name | RobertKrawitz/benchmark-runner | python | def test_oc_get_pod_name():
'\n This test run oc get pod by name\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert (oc._get_pod_name(pod_name='erererer', namespace=test_environment_variable['namespace']) == ) |
def test_oc_get_pods():
'\n This test run oc get pods\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.get_pods() | -7,074,968,122,280,273,000 | This test run oc get pods
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_get_pods | RobertKrawitz/benchmark-runner | python | def test_oc_get_pods():
'\n This test run oc get pods\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
assert oc.get_pods() |
def test_get_prom_token():
'\n This method return prom token from cluster\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_prom_token() | 4,325,391,949,915,039,000 | This method return prom token from cluster
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_get_prom_token | RobertKrawitz/benchmark-runner | python | def test_get_prom_token():
'\n This method return prom token from cluster\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.get_prom_token() |
def test_is_cnv_installed():
'\n This method check if cnv operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_cnv_installed() | 1,317,381,338,846,734,600 | This method check if cnv operator is installed
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_is_cnv_installed | RobertKrawitz/benchmark-runner | python | def test_is_cnv_installed():
'\n This method check if cnv operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_cnv_installed() |
def test_is_kata_installed():
'\n This method checks if the sandboxed containers (kata) operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed() | -48,029,872,576,008,216 | This method checks if the sandboxed containers (kata) operator is installed
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_is_kata_installed | RobertKrawitz/benchmark-runner | python | def test_is_kata_installed():
'\n This method checks if the sandboxed containers (kata) operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed() |
def test_is_ocs_installed():
'\n This method check if ocs operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_ocs_installed() | -5,860,578,108,085,043,000 | This method check if ocs operator is installed
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_is_ocs_installed | RobertKrawitz/benchmark-runner | python | def test_is_ocs_installed():
'\n This method check if ocs operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_ocs_installed() |
def test_is_kata_installed():
'\n This method check if kata operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed() | -612,831,646,245,680,800 | This method check if kata operator is installed
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_is_kata_installed | RobertKrawitz/benchmark-runner | python | def test_is_kata_installed():
'\n This method check if kata operator is installed\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
assert oc.is_kata_installed() |
def test_oc_exec():
'\n Test that oc exec works\n :return:\n '
test_message = 'I am here'
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
answer = oc.exec(pod_name='prometheus-k8s-0', namespace='openshift-monitoring', container='prometheus', command=f'... | 3,037,614,774,960,477,700 | Test that oc exec works
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_oc_exec | RobertKrawitz/benchmark-runner | python | def test_oc_exec():
'\n Test that oc exec works\n :return:\n '
test_message = 'I am here'
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
answer = oc.exec(pod_name='prometheus-k8s-0', namespace='openshift-monitoring', container='prometheus', command=f'... |
def test_collect_prometheus():
'\n Test that Prometheus data can be collected. TBD test that data is valid.\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with tempfile.TemporaryDirectory() as dirname:
snapshot = PrometheusSnapshot... | -6,243,749,812,123,490,000 | Test that Prometheus data can be collected. TBD test that data is valid.
:return: | tests/integration/benchmark_runner/common/oc/test_oc_without_operator.py | test_collect_prometheus | RobertKrawitz/benchmark-runner | python | def test_collect_prometheus():
'\n Test that Prometheus data can be collected. TBD test that data is valid.\n :return:\n '
oc = OC(kubeadmin_password=test_environment_variable['kubeadmin_password'])
oc.login()
with tempfile.TemporaryDirectory() as dirname:
snapshot = PrometheusSnapshot... |
@property
def splits(self):
'Dictionary of split names and probabilities. Must sum to one.'
raise NotImplementedError() | 401,169,447,897,842,200 | Dictionary of split names and probabilities. Must sum to one. | magenta/models/score2perf/score2perf.py | splits | flyingleafe/magenta | python | @property
def splits(self):
raise NotImplementedError() |
@property
def min_hop_size_seconds(self):
'Minimum hop size in seconds at which to split input performances.'
raise NotImplementedError() | -1,182,965,727,413,683,200 | Minimum hop size in seconds at which to split input performances. | magenta/models/score2perf/score2perf.py | min_hop_size_seconds | flyingleafe/magenta | python | @property
def min_hop_size_seconds(self):
raise NotImplementedError() |
@property
def max_hop_size_seconds(self):
'Maximum hop size in seconds at which to split input performances.'
raise NotImplementedError() | -7,320,718,132,117,424,000 | Maximum hop size in seconds at which to split input performances. | magenta/models/score2perf/score2perf.py | max_hop_size_seconds | flyingleafe/magenta | python | @property
def max_hop_size_seconds(self):
raise NotImplementedError() |
@property
def num_replications(self):
'Number of times entire input performances will be split.'
return 1 | 6,038,184,881,289,907,000 | Number of times entire input performances will be split. | magenta/models/score2perf/score2perf.py | num_replications | flyingleafe/magenta | python | @property
def num_replications(self):
return 1 |
@property
def add_eos_symbol(self):
'Whether to append EOS to encoded performances.'
raise NotImplementedError() | -1,922,712,463,153,412,000 | Whether to append EOS to encoded performances. | magenta/models/score2perf/score2perf.py | add_eos_symbol | flyingleafe/magenta | python | @property
def add_eos_symbol(self):
raise NotImplementedError() |
@property
def absolute_timing(self):
'Whether or not score should use absolute (vs. tempo-relative) timing.'
return False | 8,370,973,809,132,255,000 | Whether or not score should use absolute (vs. tempo-relative) timing. | magenta/models/score2perf/score2perf.py | absolute_timing | flyingleafe/magenta | python | @property
def absolute_timing(self):
return False |
@property
def stretch_factors(self):
'Temporal stretch factors for data augmentation (in datagen).'
return [1.0] | 2,906,986,062,144,383,000 | Temporal stretch factors for data augmentation (in datagen). | magenta/models/score2perf/score2perf.py | stretch_factors | flyingleafe/magenta | python | @property
def stretch_factors(self):
return [1.0] |
@property
def transpose_amounts(self):
'Pitch transposition amounts for data augmentation (in datagen).'
return [0] | -979,399,267,056,224,400 | Pitch transposition amounts for data augmentation (in datagen). | magenta/models/score2perf/score2perf.py | transpose_amounts | flyingleafe/magenta | python | @property
def transpose_amounts(self):
return [0] |
@property
def random_crop_length_in_datagen(self):
'Randomly crop targets to this length in datagen.'
return None | 9,185,185,018,205,633,000 | Randomly crop targets to this length in datagen. | magenta/models/score2perf/score2perf.py | random_crop_length_in_datagen | flyingleafe/magenta | python | @property
def random_crop_length_in_datagen(self):
return None |
@property
def random_crop_in_train(self):
'Whether to randomly crop each training example when preprocessing.'
return False | -3,151,171,822,926,777,300 | Whether to randomly crop each training example when preprocessing. | magenta/models/score2perf/score2perf.py | random_crop_in_train | flyingleafe/magenta | python | @property
def random_crop_in_train(self):
return False |
@property
def split_in_eval(self):
'Whether to split each eval example when preprocessing.'
return False | -2,600,506,686,284,337,000 | Whether to split each eval example when preprocessing. | magenta/models/score2perf/score2perf.py | split_in_eval | flyingleafe/magenta | python | @property
def split_in_eval(self):
return False |
def performances_input_transform(self, tmp_dir):
'Input performances beam transform (or dictionary thereof) for datagen.'
raise NotImplementedError() | -5,446,088,655,176,826,000 | Input performances beam transform (or dictionary thereof) for datagen. | magenta/models/score2perf/score2perf.py | performances_input_transform | flyingleafe/magenta | python | def performances_input_transform(self, tmp_dir):
raise NotImplementedError() |
def performance_encoder(self):
'Encoder for target performances.'
return music_encoders.MidiPerformanceEncoder(steps_per_second=STEPS_PER_SECOND, num_velocity_bins=NUM_VELOCITY_BINS, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH, add_eos=self.add_eos_symbol) | 7,870,267,202,908,675,000 | Encoder for target performances. | magenta/models/score2perf/score2perf.py | performance_encoder | flyingleafe/magenta | python | def performance_encoder(self):
return music_encoders.MidiPerformanceEncoder(steps_per_second=STEPS_PER_SECOND, num_velocity_bins=NUM_VELOCITY_BINS, min_pitch=MIN_PITCH, max_pitch=MAX_PITCH, add_eos=self.add_eos_symbol) |
def score_encoders(self):
'List of (name, encoder) tuples for input score components.'
return [] | 5,118,624,544,231,853,000 | List of (name, encoder) tuples for input score components. | magenta/models/score2perf/score2perf.py | score_encoders | flyingleafe/magenta | python | def score_encoders(self):
return [] |
def augment_note_sequence(ns, stretch_factor, transpose_amount):
'Augment a NoteSequence by time stretch and pitch transposition.'
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, ... | 2,368,470,625,032,840,000 | Augment a NoteSequence by time stretch and pitch transposition. | magenta/models/score2perf/score2perf.py | augment_note_sequence | flyingleafe/magenta | python | def augment_note_sequence(ns, stretch_factor, transpose_amount):
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=... |
def augment_note_sequence(ns, stretch_factor, transpose_amount):
'Augment a NoteSequence by time stretch and pitch transposition.'
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, ... | 2,368,470,625,032,840,000 | Augment a NoteSequence by time stretch and pitch transposition. | magenta/models/score2perf/score2perf.py | augment_note_sequence | flyingleafe/magenta | python | def augment_note_sequence(ns, stretch_factor, transpose_amount):
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=... |
def augment_note_sequence(ns, stretch_factor, transpose_amount):
'Augment a NoteSequence by time stretch and pitch transposition.'
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, ... | 2,368,470,625,032,840,000 | Augment a NoteSequence by time stretch and pitch transposition. | magenta/models/score2perf/score2perf.py | augment_note_sequence | flyingleafe/magenta | python | def augment_note_sequence(ns, stretch_factor, transpose_amount):
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=... |
def augment_note_sequence(ns, stretch_factor, transpose_amount):
'Augment a NoteSequence by time stretch and pitch transposition.'
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, ... | 2,368,470,625,032,840,000 | Augment a NoteSequence by time stretch and pitch transposition. | magenta/models/score2perf/score2perf.py | augment_note_sequence | flyingleafe/magenta | python | def augment_note_sequence(ns, stretch_factor, transpose_amount):
augmented_ns = sequences_lib.stretch_note_sequence(ns, stretch_factor, in_place=False)
try:
(_, num_deleted_notes) = sequences_lib.transpose_note_sequence(augmented_ns, transpose_amount, min_allowed_pitch=MIN_PITCH, max_allowed_pitch=... |
def __init__(self, trainer):
'\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n '
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
... | 1,764,111,408,306,437,600 | Generates a path for saving model which can also be used for resuming
from a checkpoint. | pythia/utils/checkpoint.py | __init__ | likenneth/mmgnn_textvqa | python | def __init__(self, trainer):
'\n Generates a path for saving model which can also be used for resuming\n from a checkpoint.\n '
self.trainer = trainer
self.config = self.trainer.config
self.save_dir = self.config.training_parameters.save_dir
self.model_name = self.config.model
... |
def create_user(self, email, password=None, **extra_fields):
'Create and saves a new user'
if (not email):
raise ValueError('Users must have email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return u... | -6,611,066,487,681,690,000 | Create and saves a new user | app/core/models.py | create_user | shadow-smoke/recipe-app-api | python | def create_user(self, email, password=None, **extra_fields):
if (not email):
raise ValueError('Users must have email address')
user = self.model(email=self.normalize_email(email), **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user |
def transaction_exists(self, pkglist):
'\n checks the package list to see if any packages are\n involved in an incomplete transaction\n '
conflicts = []
if (not transaction_helpers):
return conflicts
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
unfinished_tra... | 3,814,851,130,299,122,000 | checks the package list to see if any packages are
involved in an incomplete transaction | venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py | transaction_exists | aburan28/ansible-devops-pipeline | python | def transaction_exists(self, pkglist):
'\n checks the package list to see if any packages are\n involved in an incomplete transaction\n '
conflicts = []
if (not transaction_helpers):
return conflicts
pkglist_nvreas = (splitFilename(pkg) for pkg in pkglist)
unfinished_tra... |
def local_envra(self, path):
'return envra of a local rpm passed in'
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return ... | -6,192,923,276,369,877,000 | return envra of a local rpm passed in | venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py | local_envra | aburan28/ansible-devops-pipeline | python | def local_envra(self, path):
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
except rpm.error as e:
return None
finally:
os.close(fd)
return ('%s:%s-%s-%s.%s' % ((header[rpm.RPMTAG... |
def run(self):
'\n actually execute the module code backend\n '
error_msgs = []
if (not HAS_RPM_PYTHON):
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if (not HAS_YUM_PYTHON):
... | 6,903,917,648,374,279,000 | actually execute the module code backend | venv/lib/python2.7/site-packages/ansible/modules/packaging/os/yum.py | run | aburan28/ansible-devops-pipeline | python | def run(self):
'\n \n '
error_msgs = []
if (not HAS_RPM_PYTHON):
error_msgs.append('The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.')
if (not HAS_YUM_PYTHON):
error_msgs.append('The Python 2 yum... |
def graph_degree(A):
'\n Returns the degree for the nodes (rows) of a symmetric \n graph in sparse CSR or CSC format, or a qobj.\n \n Parameters\n ----------\n A : qobj, csr_matrix, csc_matrix\n Input quantum object or csr_matrix.\n \n Returns\n -------\n degree : array\n ... | 8,779,110,006,112,680,000 | Returns the degree for the nodes (rows) of a symmetric
graph in sparse CSR or CSC format, or a qobj.
Parameters
----------
A : qobj, csr_matrix, csc_matrix
Input quantum object or csr_matrix.
Returns
-------
degree : array
Array of integers giving the degree for each node (row). | qutip/graph.py | graph_degree | trxw/qutip | python | def graph_degree(A):
'\n Returns the degree for the nodes (rows) of a symmetric \n graph in sparse CSR or CSC format, or a qobj.\n \n Parameters\n ----------\n A : qobj, csr_matrix, csc_matrix\n Input quantum object or csr_matrix.\n \n Returns\n -------\n degree : array\n ... |
def breadth_first_search(A, start):
'\n Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting\n from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.\n \n This function requires a matrix with symmetric structure.\n Use A+trans(A) if original matrix is not symme... | -5,681,492,159,195,273,000 | Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting
from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.
This function requires a matrix with symmetric structure.
Use A+trans(A) if original matrix is not symmetric or not sure.
Parameters
----------
A : qobj, csr_matrix
I... | qutip/graph.py | breadth_first_search | trxw/qutip | python | def breadth_first_search(A, start):
'\n Breadth-First-Search (BFS) of a graph in CSR or CSC matrix format starting\n from a given node (row). Takes Qobjs and CSR or CSC matrices as inputs.\n \n This function requires a matrix with symmetric structure.\n Use A+trans(A) if original matrix is not symme... |
def symrcm(A, sym=False):
'\n Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj\n in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,\n this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).\n \n It is assumed by def... | -2,374,158,014,856,256,000 | Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj
in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,
this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).
It is assumed by default (*sym=False*) that the input matrix is not symmetric... | qutip/graph.py | symrcm | trxw/qutip | python | def symrcm(A, sym=False):
'\n Returns the permutation array that orders a sparse CSR or CSC matrix or Qobj\n in Reverse-Cuthill McKee ordering. Since the input matrix must be symmetric,\n this routine works on the matrix A+Trans(A) if the sym flag is set to False (Default).\n \n It is assumed by def... |
def bfs_matching(A):
'\n Returns an array of row permutations that removes nonzero elements\n from the diagonal of a nonsingular square CSC sparse matrix. Such\n a permutation is always possible provided that the matrix is \n nonsingular.\n \n This function looks at the structure of the matrix on... | 3,940,556,777,090,186,000 | Returns an array of row permutations that removes nonzero elements
from the diagonal of a nonsingular square CSC sparse matrix. Such
a permutation is always possible provided that the matrix is
nonsingular.
This function looks at the structure of the matrix only.
Parameters
----------
A : csc_matrix
Input matri... | qutip/graph.py | bfs_matching | trxw/qutip | python | def bfs_matching(A):
'\n Returns an array of row permutations that removes nonzero elements\n from the diagonal of a nonsingular square CSC sparse matrix. Such\n a permutation is always possible provided that the matrix is \n nonsingular.\n \n This function looks at the structure of the matrix on... |
def weighted_bfs_matching(A):
'\n Returns an array of row permutations that attempts to maximize\n the product of the ABS values of the diagonal elements in \n a nonsingular square CSC sparse matrix. Such a permutation is \n always possible provided that the matrix is nonsingular.\n \n This functi... | -5,521,932,354,056,884,000 | Returns an array of row permutations that attempts to maximize
the product of the ABS values of the diagonal elements in
a nonsingular square CSC sparse matrix. Such a permutation is
always possible provided that the matrix is nonsingular.
This function looks at both the structure and ABS values of the
underlying m... | qutip/graph.py | weighted_bfs_matching | trxw/qutip | python | def weighted_bfs_matching(A):
'\n Returns an array of row permutations that attempts to maximize\n the product of the ABS values of the diagonal elements in \n a nonsingular square CSC sparse matrix. Such a permutation is \n always possible provided that the matrix is nonsingular.\n \n This functi... |
def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
'A better wrapper over request for deferred signing'
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
... | -5,809,463,524,355,869,000 | A better wrapper over request for deferred signing | python/ccxt/base/exchange.py | fetch2 | newdime/ccxt | python | def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.enableRateLimit:
self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return self.fetch(request['url'], request['metho... |
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
'Exchange.request is the entry point for all generated methods'
return self.fetch2(path, api, method, params, headers, body) | 6,673,804,092,993,897,000 | Exchange.request is the entry point for all generated methods | python/ccxt/base/exchange.py | request | newdime/ccxt | python | def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
return self.fetch2(path, api, method, params, headers, body) |
def find_broadly_matched_key(self, broad, string):
'A helper method for matching error strings exactly vs broadly'
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if (string.find(key) >= 0):
return key
return None | 1,118,882,194,763,658,900 | A helper method for matching error strings exactly vs broadly | python/ccxt/base/exchange.py | find_broadly_matched_key | newdime/ccxt | python | def find_broadly_matched_key(self, broad, string):
keys = list(broad.keys())
for i in range(0, len(keys)):
key = keys[i]
if (string.find(key) >= 0):
return key
return None |
def fetch(self, url, method='GET', headers=None, body=None):
'Perform a HTTP request and return decoded JSON data'
request_headers = self.prepare_request_headers(headers)
url = (self.proxy + url)
if self.verbose:
print('\nRequest:', method, url, request_headers, body)
self.logger.debug('%s %... | -7,195,045,384,639,707,000 | Perform a HTTP request and return decoded JSON data | python/ccxt/base/exchange.py | fetch | newdime/ccxt | python | def fetch(self, url, method='GET', headers=None, body=None):
request_headers = self.prepare_request_headers(headers)
url = (self.proxy + url)
if self.verbose:
print('\nRequest:', method, url, request_headers, body)
self.logger.debug('%s %s, Request: %s %s', method, url, request_headers, bod... |
@staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
'A helper-wrapper for the safe_value_2() family.'
value = method(dictionary, key1)
return (value if (value is not None) else method(dictionary, key2, default_value)) | -2,371,737,021,285,098,500 | A helper-wrapper for the safe_value_2() family. | python/ccxt/base/exchange.py | safe_either | newdime/ccxt | python | @staticmethod
def safe_either(method, dictionary, key1, key2, default_value=None):
value = method(dictionary, key1)
return (value if (value is not None) else method(dictionary, key2, default_value)) |
@staticmethod
def truncate(num, precision=0):
'Deprecated, use decimal_to_precision instead'
if (precision > 0):
decimal_precision = math.pow(10, precision)
return (math.trunc((num * decimal_precision)) / decimal_precision)
return int(Exchange.truncate_to_string(num, precision)) | 5,881,430,384,757,220,000 | Deprecated, use decimal_to_precision instead | python/ccxt/base/exchange.py | truncate | newdime/ccxt | python | @staticmethod
def truncate(num, precision=0):
if (precision > 0):
decimal_precision = math.pow(10, precision)
return (math.trunc((num * decimal_precision)) / decimal_precision)
return int(Exchange.truncate_to_string(num, precision)) |
@staticmethod
def truncate_to_string(num, precision=0):
'Deprecated, todo: remove references from subclasses'
if (precision > 0):
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = (decimal_digits if len(dec... | -3,156,627,279,850,857,000 | Deprecated, todo: remove references from subclasses | python/ccxt/base/exchange.py | truncate_to_string | newdime/ccxt | python | @staticmethod
def truncate_to_string(num, precision=0):
if (precision > 0):
parts = ('{0:.%df}' % precision).format(Decimal(num)).split('.')
decimal_digits = parts[1][:precision].rstrip('0')
decimal_digits = (decimal_digits if len(decimal_digits) else '0')
return ((parts[0] + '.... |
def check_address(self, address):
'Checks an address is not the same character repeated or an empty sequence'
if (address is None):
self.raise_error(InvalidAddress, details='address is None')
if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or ... | -2,909,175,738,945,414,700 | Checks an address is not the same character repeated or an empty sequence | python/ccxt/base/exchange.py | check_address | newdime/ccxt | python | def check_address(self, address):
if (address is None):
self.raise_error(InvalidAddress, details='address is None')
if (all(((letter == address[0]) for letter in address)) or (len(address) < self.minFundingAddressLength) or (' ' in address)):
self.raise_error(InvalidAddress, details=(((('ad... |
@functools.wraps(entry)
def inner(_self, params=None):
'\n Inner is called when a generated method (publicGetX) is called.\n _self is a reference to self created by function.__get__(exchange, type(exchange))\n https://en.wikipedia.org/... | 3,173,901,515,913,682,400 | Inner is called when a generated method (publicGetX) is called.
_self is a reference to self created by function.__get__(exchange, type(exchange))
https://en.wikipedia.org/wiki/Closure_(computer_programming) equivalent to functools.partial | python/ccxt/base/exchange.py | inner | newdime/ccxt | python | @functools.wraps(entry)
def inner(_self, params=None):
'\n Inner is called when a generated method (publicGetX) is called.\n _self is a reference to self created by function.__get__(exchange, type(exchange))\n https://en.wikipedia.org/... |
def get_core_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text:
'Recursively collects all Core training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found Core trai... | -2,413,637,754,033,914,000 | Recursively collects all Core training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found Core training files. | rasa/data.py | get_core_directory | Amirali-Shirkh/rasa-for-botfront | python | def get_core_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text:
'Recursively collects all Core training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found Core trai... |
def get_nlu_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text:
'Recursively collects all NLU training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found NLU trainin... | 871,682,756,566,041,500 | Recursively collects all NLU training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to temporary directory containing all found NLU training files. | rasa/data.py | get_nlu_directory | Amirali-Shirkh/rasa-for-botfront | python | def get_nlu_directory(paths: Optional[Union[(Text, List[Text])]]) -> Text:
'Recursively collects all NLU training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to temporary directory containing all found NLU trainin... |
def get_core_nlu_directories(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(Text, Text)]:
'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to directory containing the Core file... | 4,776,967,156,037,344,000 | Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Path to directory containing the Core files and path to directory
containing the NLU training files. | rasa/data.py | get_core_nlu_directories | Amirali-Shirkh/rasa-for-botfront | python | def get_core_nlu_directories(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(Text, Text)]:
'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Path to directory containing the Core file... |
def get_core_nlu_files(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(List[Text], List[Text])]:
'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Tuple of paths to story and NLU file... | 3,364,048,093,809,867,300 | Recursively collects all training files from a list of paths.
Args:
paths: List of paths to training files or folders containing them.
Returns:
Tuple of paths to story and NLU files. | rasa/data.py | get_core_nlu_files | Amirali-Shirkh/rasa-for-botfront | python | def get_core_nlu_files(paths: Optional[Union[(Text, List[Text])]]) -> Tuple[(List[Text], List[Text])]:
'Recursively collects all training files from a list of paths.\n\n Args:\n paths: List of paths to training files or folders containing them.\n\n Returns:\n Tuple of paths to story and NLU file... |
def is_nlu_file(file_path: Text) -> bool:
"Checks if a file is a Rasa compatible nlu file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a nlu file, otherwise `False`.\n "
return (loading.guess_format(file_path) != loading.UNK) | -8,459,099,074,937,874,000 | Checks if a file is a Rasa compatible nlu file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a nlu file, otherwise `False`. | rasa/data.py | is_nlu_file | Amirali-Shirkh/rasa-for-botfront | python | def is_nlu_file(file_path: Text) -> bool:
"Checks if a file is a Rasa compatible nlu file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a nlu file, otherwise `False`.\n "
return (loading.guess_format(file_path) != loading.UNK) |
def is_story_file(file_path: Text) -> bool:
"Checks if a file is a Rasa story file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a story file, otherwise `False`.\n "
if (not file_path.endswith('.md')):
return False
try:
... | -1,701,745,109,258,489,300 | Checks if a file is a Rasa story file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a story file, otherwise `False`. | rasa/data.py | is_story_file | Amirali-Shirkh/rasa-for-botfront | python | def is_story_file(file_path: Text) -> bool:
"Checks if a file is a Rasa story file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a story file, otherwise `False`.\n "
if (not file_path.endswith('.md')):
return False
try:
... |
def is_domain_file(file_path: Text) -> bool:
"Checks whether the given file path is a Rasa domain file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a domain file, otherwise `False`.\n "
file_name = os.path.basename(file_path)
return (... | -5,027,564,738,567,654,000 | Checks whether the given file path is a Rasa domain file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a domain file, otherwise `False`. | rasa/data.py | is_domain_file | Amirali-Shirkh/rasa-for-botfront | python | def is_domain_file(file_path: Text) -> bool:
"Checks whether the given file path is a Rasa domain file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a domain file, otherwise `False`.\n "
file_name = os.path.basename(file_path)
return (... |
def is_config_file(file_path: Text) -> bool:
"Checks whether the given file path is a Rasa config file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a Rasa config file, otherwise `False`.\n "
file_name = os.path.basename(fil... | -499,820,486,625,838,900 | Checks whether the given file path is a Rasa config file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a Rasa config file, otherwise `False`. | rasa/data.py | is_config_file | Amirali-Shirkh/rasa-for-botfront | python | def is_config_file(file_path: Text) -> bool:
"Checks whether the given file path is a Rasa config file.\n\n Args:\n file_path: Path of the file which should be checked.\n\n Returns:\n `True` if it's a Rasa config file, otherwise `False`.\n "
file_name = os.path.basename(fil... |
def login(self, vmanage_ip, username, password):
'Login to vmanage'
base_url_str = ('https://%s:8443/' % vmanage_ip)
login_action = 'j_security_check'
login_data = {'j_username': username, 'j_password': password}
login_url = (base_url_str + login_action)
url = (base_url_str + login_url)
sess... | 8,070,638,483,843,328,000 | Login to vmanage | app/Http/Controllers/Dashboard/Wan_edge_Health.py | login | victornguyen98/luanvan2020 | python | def login(self, vmanage_ip, username, password):
base_url_str = ('https://%s:8443/' % vmanage_ip)
login_action = 'j_security_check'
login_data = {'j_username': username, 'j_password': password}
login_url = (base_url_str + login_action)
url = (base_url_str + login_url)
sess = requests.sessio... |
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable['_models.OperationListResult']:
'Lists all of the available Microsoft.Resources REST API operations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of ei... | -4,370,259,222,565,901,300 | Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resou... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | list | AikoBB/azure-sdk-for-python | python | @distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable['_models.OperationListResult']:
'Lists all of the available Microsoft.Resources REST API operations.\n\n :keyword callable cls: A custom type or function that will be passed the direct response\n :return: An iterator like instance of ei... |
@distributed_trace_async
async def begin_delete_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Deletes a deployment from the deployment history.\n\n A template deployment that is currently running cannot be deleted. Deleting a template\n deployment removes th... | 4,929,680,912,637,127,000 | Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Loc... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | begin_delete_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def begin_delete_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> AsyncLROPoller[None]:
'Deletes a deployment from the deployment history.\n\n A template deployment that is currently running cannot be deleted. Deleting a template\n deployment removes th... |
@distributed_trace_async
async def check_existence_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> bool:
'Checks whether the deployment exists.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type... | -7,118,669,282,943,952,000 | Checks whether the deployment exists.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: b... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | check_existence_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def check_existence_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> bool:
'Checks whether the deployment exists.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type... |
@distributed_trace_async
async def begin_create_or_update_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> AsyncLROPoller['_models.DeploymentExtended']:
'Deploys resources at a given scope.\n\n You can provide the template and parameters directly in the req... | 3,362,381,667,887,774,700 | Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to t... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | begin_create_or_update_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def begin_create_or_update_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> AsyncLROPoller['_models.DeploymentExtended']:
'Deploys resources at a given scope.\n\n You can provide the template and parameters directly in the req... |
@distributed_trace_async
async def get_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> '_models.DeploymentExtended':
'Gets a deployment.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deploy... | -2,818,057,004,809,382,000 | Gets a deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | get_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def get_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> '_models.DeploymentExtended':
'Gets a deployment.\n\n :param scope: The scope of a deployment.\n :type scope: str\n :param deployment_name: The name of the deployment.\n :type deploy... |
@distributed_trace_async
async def cancel_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> None:
'Cancels a currently running template deployment.\n\n You can cancel a deployment only if the provisioningState is Accepted or Running. After the\n deployment is canceled, the provisionin... | 5,202,299,697,922,900,000 | Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partial... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | cancel_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def cancel_at_scope(self, scope: str, deployment_name: str, **kwargs: Any) -> None:
'Cancels a currently running template deployment.\n\n You can cancel a deployment only if the provisioningState is Accepted or Running. After the\n deployment is canceled, the provisionin... |
@distributed_trace_async
async def validate_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> '_models.DeploymentValidateResult':
'Validates whether the specified template is syntactically correct and will be accepted by Azure\n Resource Manager..\n\n ... | -5,833,968,429,805,891,000 | Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure... | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py | validate_at_scope | AikoBB/azure-sdk-for-python | python | @distributed_trace_async
async def validate_at_scope(self, scope: str, deployment_name: str, parameters: '_models.Deployment', **kwargs: Any) -> '_models.DeploymentValidateResult':
'Validates whether the specified template is syntactically correct and will be accepted by Azure\n Resource Manager..\n\n ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.