hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
790b8439df274a71f1f6f1d643e546fbfd995174
| 1,011
|
py
|
Python
|
axelrod/strategies/appeaser.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | null | null | null |
axelrod/strategies/appeaser.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | 1
|
2019-01-22T09:59:52.000Z
|
2019-01-22T09:59:52.000Z
|
axelrod/strategies/appeaser.py
|
danilobellini/Axelrod
|
2c9212553e06095c24adcb82a5979279cbdf45fb
|
[
"MIT"
] | null | null | null |
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Appeaser(Player):
"""A player who tries to guess what the opponent wants.
Switch the classifier every time the opponent plays D.
Start with C, switch between C and D when opponent plays D.
Names:
- Appeaser: Original Name by Jochen Müller
"""
name = "Appeaser"
classifier = {
"memory_depth": float("inf"), # Depends on internal memory.
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
if not len(opponent.history):
return C
else:
if opponent.history[-1] == D:
if self.history[-1] == C:
return D
else:
return C
return self.history[-1]
| 25.923077
| 68
| 0.575668
|
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class Appeaser(Player):
name = "Appeaser"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
if not len(opponent.history):
return C
else:
if opponent.history[-1] == D:
if self.history[-1] == C:
return D
else:
return C
return self.history[-1]
| true
| true
|
790b8540dbd4966bf80310f8f5822def5402e059
| 2,216
|
py
|
Python
|
history/archiver_vendor.py
|
evandroforks/CrowdAnki
|
00a8ca39650f1eee2258d0d087f89600338238c1
|
[
"MIT"
] | null | null | null |
history/archiver_vendor.py
|
evandroforks/CrowdAnki
|
00a8ca39650f1eee2258d0d087f89600338238c1
|
[
"MIT"
] | null | null | null |
history/archiver_vendor.py
|
evandroforks/CrowdAnki
|
00a8ca39650f1eee2258d0d087f89600338238c1
|
[
"MIT"
] | null | null | null |
from dataclasses import field, dataclass
from pathlib import Path
from typing import Any
from .anki_deck_archiver import AnkiDeckArchiver
from .archiver import AllDeckArchiver
from .dulwich_repo import DulwichAnkiRepo
from ..anki.adapters.deck_manager import AnkiStaticDeckManager, DeckManager
from ..anki.ui.utils import progress_indicator
from ..config.config_settings import ConfigSettings
from ..export.anki_exporter import AnkiJsonExporter
from ..utils.notifier import Notifier, AnkiTooltipNotifier
@dataclass
class ArchiverVendor:
window: Any
config: ConfigSettings
notifier: Notifier = field(default_factory=AnkiTooltipNotifier)
@property
def deck_manager(self) -> DeckManager:
return AnkiStaticDeckManager(self.window.col.decks)
def all_deck_archiver(self):
return AllDeckArchiver(
self.deck_manager,
lambda deck: AnkiDeckArchiver(deck,
self.config.full_snapshot_path,
AnkiJsonExporter(self.window.col, self.config),
DulwichAnkiRepo))
def snapshot_path(self):
return Path(self.config.snapshot_path)
def do_manual_snapshot(self):
self.do_snapshot('CrowdAnki: Manual snapshot')
def snapshot_on_sync(self):
if self.config.automated_snapshot:
self.do_snapshot('CrowdAnki: Snapshot on sync')
def do_snapshot(self, reason):
with progress_indicator(self.window, 'Taking CrowdAnki snapshot of all decks'):
import datetime
print(f"{datetime.datetime.now()} Starting snapshot for {self.config.full_snapshot_path}...")
self.all_deck_archiver().archive(overrides=self.overrides(),
reason=reason)
print(f"{datetime.datetime.now()} Finished snapshot for {self.config.full_snapshot_path}...")
self.notifier.info("Snapshot successful",
f"The CrowdAnki snapshot to {str(self.config.full_snapshot_path)} successfully completed")
def overrides(self):
return self.deck_manager.for_names(self.config.snapshot_root_decks)
| 40.290909
| 121
| 0.678249
|
from dataclasses import field, dataclass
from pathlib import Path
from typing import Any
from .anki_deck_archiver import AnkiDeckArchiver
from .archiver import AllDeckArchiver
from .dulwich_repo import DulwichAnkiRepo
from ..anki.adapters.deck_manager import AnkiStaticDeckManager, DeckManager
from ..anki.ui.utils import progress_indicator
from ..config.config_settings import ConfigSettings
from ..export.anki_exporter import AnkiJsonExporter
from ..utils.notifier import Notifier, AnkiTooltipNotifier
@dataclass
class ArchiverVendor:
window: Any
config: ConfigSettings
notifier: Notifier = field(default_factory=AnkiTooltipNotifier)
@property
def deck_manager(self) -> DeckManager:
return AnkiStaticDeckManager(self.window.col.decks)
def all_deck_archiver(self):
return AllDeckArchiver(
self.deck_manager,
lambda deck: AnkiDeckArchiver(deck,
self.config.full_snapshot_path,
AnkiJsonExporter(self.window.col, self.config),
DulwichAnkiRepo))
def snapshot_path(self):
return Path(self.config.snapshot_path)
def do_manual_snapshot(self):
self.do_snapshot('CrowdAnki: Manual snapshot')
def snapshot_on_sync(self):
if self.config.automated_snapshot:
self.do_snapshot('CrowdAnki: Snapshot on sync')
def do_snapshot(self, reason):
with progress_indicator(self.window, 'Taking CrowdAnki snapshot of all decks'):
import datetime
print(f"{datetime.datetime.now()} Starting snapshot for {self.config.full_snapshot_path}...")
self.all_deck_archiver().archive(overrides=self.overrides(),
reason=reason)
print(f"{datetime.datetime.now()} Finished snapshot for {self.config.full_snapshot_path}...")
self.notifier.info("Snapshot successful",
f"The CrowdAnki snapshot to {str(self.config.full_snapshot_path)} successfully completed")
def overrides(self):
return self.deck_manager.for_names(self.config.snapshot_root_decks)
| true
| true
|
790b85911df7f26d94219aca324a2a9839d796a7
| 262
|
py
|
Python
|
pydsge/examples/dfi_funcs.py
|
florabudianto/pydsge
|
51ea4c206e481866f92398cb573852e48fea7335
|
[
"MIT"
] | 2
|
2022-02-15T10:39:24.000Z
|
2022-02-15T10:40:26.000Z
|
pydsge/examples/dfi_funcs.py
|
florabudianto/pydsge
|
51ea4c206e481866f92398cb573852e48fea7335
|
[
"MIT"
] | 4
|
2021-12-31T16:27:48.000Z
|
2022-01-27T17:16:19.000Z
|
pydsge/examples/dfi_funcs.py
|
pcschreiber1/pydsge_OSE_Project_Fork
|
4222dbe187e47958d2f5b732615c9ba97547f67a
|
[
"MIT"
] | 1
|
2022-02-15T10:40:32.000Z
|
2022-02-15T10:40:32.000Z
|
#!/bin/python
# -*- coding: utf-8 -*-
# import numpy as np
# define additional functions used in the *.yaml.
# Of course, as this is a trivial function you could have defined it in the *.yaml directly
def calc_nu(nub):
nu = nub / (1 - nub)
return nu
| 20.153846
| 91
| 0.656489
|
def calc_nu(nub):
nu = nub / (1 - nub)
return nu
| true
| true
|
790b85930358d669705526bb084f3cd1a21e0745
| 5,375
|
py
|
Python
|
procuratorate/dataocean_judger.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
procuratorate/dataocean_judger.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
procuratorate/dataocean_judger.py
|
diudiu/featurefactory
|
ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""
Copyright (c) 2013-2016 SYPH, All Rights Reserved.
-----------------------------------------------------------
Author: S.JunPeng
Date: 2016/12/22
Change Activity:
"""
import logging
import json
from vendor.utils.encrypt import Cryption
from apps.common.models import ClientOverview
from apps.remote.models import FeatureFieldRel
from apps.etl.context import ApplyContext
from vendor.errors.api_errors import *
logger = logging.getLogger('apps.featureapi')
class Judger(object):
"""
1.authentication (_check_identity)
2.data decryption (_decrypt)
3.check availability of arguments (_args_useful_check)
4.throw the Exceptions
5.finally check all works
"""
def __init__(self, client_code, data):
self.client_code = client_code
self.client_id = ''
self.client_secret = ''
self.des_key = ''
self.origin_data = data
self.cryption = Cryption()
self.apply_id = ''
self.target_features = []
self.arguments = {}
self.ret_msg = []
def _check_sum(self):
if self.client_id and self.client_secret and self.des_key and self.target_features and self.arguments \
and (len(self.target_features) == len(self.ret_msg)):
return True
else:
return False
def _check_identity(self):
client_package = ClientOverview.objects.filter(client_code=self.client_code)
if not client_package:
logger.error('Response from the function of `judge._check_identity`, error_msg=%s, rel_err_msg=%s'
% (UserIdentityError.message, 'No data in ClientOverview'), exc_info=True)
raise UserIdentityError # E02
client_package = client_package[0]
self.client_id = client_package.client_id
self.client_secret = client_package.client_secret
self.des_key = client_package.des_key
def encrypt(self, data):
json_data = json.dumps(data)
des_data = Cryption.aes_base64_encrypt(json_data, self.des_key)
return des_data
def _decrypt(self):
try:
json_data = Cryption.aes_base64_decrypt(self.origin_data, self.des_key)
message = json.loads(json_data)
except Exception as e:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (EncryptError.message, e.message), exc_info=True)
raise EncryptError # E03
self.apply_id = message.get('apply_id', None)
if not self.apply_id:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetApplyIdError.message, "Missing apply_id in the post_data"), exc_info=True)
raise GetApplyIdError # E04
self.target_features = message.get('res_keys', None)
if not self.target_features:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetResKeysError.message, "Missing res_keys in the post_data"), exc_info=True)
raise GetResKeysError # E05
apply_base = ApplyContext(self.apply_id)
self.arguments = apply_base.load()
if not self.arguments:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetArgumentsError.message, "Missing arguments in the post_data"), exc_info=True)
raise GetArgumentsError # E06
def _args_useful_check(self):
"""
need sql which mapping the target features and arguments
:return:
"""
arg_msg_list = FeatureFieldRel.objects.filter(
feature_name__in=self.target_features,
is_delete=False,
)
for arg_msg in arg_msg_list:
if arg_msg.raw_field_name in self.arguments.keys():
if self.ret_msg and (arg_msg.feature_name == (self.ret_msg[-1])['target_field_name']):
sub_msg = self.ret_msg[-1]
if arg_msg.feature_name == sub_msg['target_field_name']:
sub_msg['arguments'].update({
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
})
self.ret_msg[-1] = sub_msg
else:
temp_msg = {
'data_identity': arg_msg.data_identity,
'target_field_name': arg_msg.feature_name,
'arguments': {
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
}
}
self.ret_msg.append(temp_msg)
else:
logger.error('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s'
% (ArgumentsAvailableError.message, "Arguments are not enough to get all res_keys"),
exc_info=True)
raise ArgumentsAvailableError # E07
def work_stream(self):
self._check_identity()
self._decrypt()
self._args_useful_check()
return self._check_sum()
| 39.814815
| 117
| 0.596093
|
import logging
import json
from vendor.utils.encrypt import Cryption
from apps.common.models import ClientOverview
from apps.remote.models import FeatureFieldRel
from apps.etl.context import ApplyContext
from vendor.errors.api_errors import *
logger = logging.getLogger('apps.featureapi')
class Judger(object):
def __init__(self, client_code, data):
self.client_code = client_code
self.client_id = ''
self.client_secret = ''
self.des_key = ''
self.origin_data = data
self.cryption = Cryption()
self.apply_id = ''
self.target_features = []
self.arguments = {}
self.ret_msg = []
def _check_sum(self):
if self.client_id and self.client_secret and self.des_key and self.target_features and self.arguments \
and (len(self.target_features) == len(self.ret_msg)):
return True
else:
return False
def _check_identity(self):
client_package = ClientOverview.objects.filter(client_code=self.client_code)
if not client_package:
logger.error('Response from the function of `judge._check_identity`, error_msg=%s, rel_err_msg=%s'
% (UserIdentityError.message, 'No data in ClientOverview'), exc_info=True)
raise UserIdentityError
client_package = client_package[0]
self.client_id = client_package.client_id
self.client_secret = client_package.client_secret
self.des_key = client_package.des_key
def encrypt(self, data):
json_data = json.dumps(data)
des_data = Cryption.aes_base64_encrypt(json_data, self.des_key)
return des_data
def _decrypt(self):
try:
json_data = Cryption.aes_base64_decrypt(self.origin_data, self.des_key)
message = json.loads(json_data)
except Exception as e:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (EncryptError.message, e.message), exc_info=True)
raise EncryptError
self.apply_id = message.get('apply_id', None)
if not self.apply_id:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetApplyIdError.message, "Missing apply_id in the post_data"), exc_info=True)
raise GetApplyIdError
self.target_features = message.get('res_keys', None)
if not self.target_features:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetResKeysError.message, "Missing res_keys in the post_data"), exc_info=True)
raise GetResKeysError
apply_base = ApplyContext(self.apply_id)
self.arguments = apply_base.load()
if not self.arguments:
logger.error('Response from the function of `judge._decrypt`, error_msg=%s, rel_err_msg=%s'
% (GetArgumentsError.message, "Missing arguments in the post_data"), exc_info=True)
raise GetArgumentsError
def _args_useful_check(self):
arg_msg_list = FeatureFieldRel.objects.filter(
feature_name__in=self.target_features,
is_delete=False,
)
for arg_msg in arg_msg_list:
if arg_msg.raw_field_name in self.arguments.keys():
if self.ret_msg and (arg_msg.feature_name == (self.ret_msg[-1])['target_field_name']):
sub_msg = self.ret_msg[-1]
if arg_msg.feature_name == sub_msg['target_field_name']:
sub_msg['arguments'].update({
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
})
self.ret_msg[-1] = sub_msg
else:
temp_msg = {
'data_identity': arg_msg.data_identity,
'target_field_name': arg_msg.feature_name,
'arguments': {
arg_msg.raw_field_name: self.arguments[arg_msg.raw_field_name],
}
}
self.ret_msg.append(temp_msg)
else:
logger.error('Response from the function of `judge._args_useful_check`, error_msg=%s, rel_err_msg=%s'
% (ArgumentsAvailableError.message, "Arguments are not enough to get all res_keys"),
exc_info=True)
raise ArgumentsAvailableError
def work_stream(self):
self._check_identity()
self._decrypt()
self._args_useful_check()
return self._check_sum()
| true
| true
|
790b8927e288ae6d7dde5a943d6d44cbaa5191de
| 17,915
|
py
|
Python
|
src/models/block_mixture_gp_softmax.py
|
jp2011/spatial-poisson-mixtures
|
9e535a636e710a9fa146cbbd4613ece70ec90791
|
[
"MIT"
] | 3
|
2020-06-18T10:57:47.000Z
|
2022-03-07T12:13:04.000Z
|
src/models/block_mixture_gp_softmax.py
|
jp2011/spatial-poisson-mixtures
|
9e535a636e710a9fa146cbbd4613ece70ec90791
|
[
"MIT"
] | null | null | null |
src/models/block_mixture_gp_softmax.py
|
jp2011/spatial-poisson-mixtures
|
9e535a636e710a9fa146cbbd4613ece70ec90791
|
[
"MIT"
] | null | null | null |
import logging
import os
import pickle
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import zsampler
from dotenv import load_dotenv, find_dotenv
from scipy.special import logsumexp, softmax
from src.inference.context_geo import GridContextGeo, gp_inflate_duplicate, gp_deflate_sum
from src.inference.hmc import HMCSampler
from src.inference.priors import BetaPriorWithIntercept, GaussianPrior, GPNonGridPriorSqExpFixed
from src.experiment.visualize import plot_traceplots
class BlockMixtureGpSoftmaxAllocation:
def __init__(self, *, uid=None,
grid_context=None,
K=1,
block_type="msoa",
hmc_all_iterations=100_000,
hmc_burn_in=25_000,
hmc_calibration=50_000,
hmc_info_interval=20_000,
hmc_thinning=5,
verbose=False,
lengthscale=1):
self.uid = uid
self.context = grid_context
self.K = K
self.NN = self.context.mask.shape[0]
self.hmc_thinning = hmc_thinning
self.hmc_info_interval = hmc_info_interval
self.N = grid_context.counts.shape[0]
self.J = self.context.J
# do a random assignment to mixtures
initial_Z = np.zeros((self.N, self.K), dtype=int)
initial_Z[np.arange(self.N), np.random.choice(self.K, self.N)] = 1
self.Z_samples = []
# Create an (N x 1) vector which gives the corresponding block for each cell.
if block_type == "lad":
block_assignment = np.asarray(grid_context.lads)
elif block_type == "msoa":
block_assignment = np.asarray(grid_context.msoas)
elif block_type == "ward":
block_assignment = np.asarray(grid_context.wards)
else:
block_assignment = np.repeat(1, self.N) # a single block
# read in block centroid coordinates
block_centroid_file_path = Path(os.getcwd()) / "data" / "processed" / f"{block_type}-centroids-map.csv"
block_centroids = pd.read_csv(block_centroid_file_path)
self.coord_x = block_centroids["x"].values
self.coord_x = self.coord_x - np.min(self.coord_x)
self.coord_y = block_centroids["y"].values
self.coord_y = self.coord_y - np.min(self.coord_y)
self.block_labels = block_centroids.iloc[:, 1].values
# Create the cell <-> block mapping (mind the ordering of the blocks)
unique_block_labels = np.unique(self.block_labels)
self.block_assignment_numeric = np.zeros(block_assignment.shape[0], dtype=np.int)
for idx_cell, block_label in enumerate(block_assignment):
self.block_assignment_numeric[idx_cell] = np.where(unique_block_labels == block_label)[0]
self.block_assignment = block_assignment
B = np.max(self.block_assignment_numeric) + 1
self.B = B
self.lengthscale = lengthscale
# Priors
self.beta_prior = BetaPriorWithIntercept(a=1, b=0.01)
self.f_prior = GPNonGridPriorSqExpFixed(coord_x=self.coord_x, coord_y=self.coord_y,
variance=100, lengthscale=self.lengthscale)
self.log_theta_prior = GaussianPrior(mean=np.asarray([0]), variance=np.asarray([1e2]))
init_beta_estimand = np.random.normal(0, 1, self.context.J * self.K)
init_beta_mass_matrix = 1e3 * np.ones(self.context.J * self.K)
self.beta_sampler = HMCSampler(func_lpdf=self.beta_loglik,
func_nabla_lpdf=self.nabla_beta_loglik,
func_plot=self.plot_beta if verbose else None,
init_estimand=init_beta_estimand,
init_M_diag=init_beta_mass_matrix,
init_L=20,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=True)
init_f_estimand = np.random.normal(0, 1, B * self.K)
init_f_mass_matrix = 1e4 * np.ones(B * self.K)
self.f_sampler = HMCSampler(func_lpdf=self.f_loglik,
func_nabla_lpdf=self.nabla_f_loglik,
func_plot=self.plot_f if verbose else None,
init_estimand=init_f_estimand,
init_M_diag=init_f_mass_matrix,
init_L=100,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=False)
self.current_beta = self.beta_sampler.estimand
self.current_f = self.f_sampler.estimand
self.current_Z = initial_Z
self.logger = logging.getLogger(__name__)
def beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix
Z = self.current_Z
counts = self.context.counts
covariates = self.context.covariates
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
poisson_part = np.sum(np.multiply(counts, fixed_effects) - np.exp(fixed_effects))
beta_part = self.beta_prior.log_pdf(beta_estimand, self.J)
output = poisson_part + beta_part
return output
def nabla_beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F') # build a J x K matrix
counts = self.context.counts
covariates = self.context.covariates
Z = self.current_Z
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
nabla_beta_matrix = np.zeros(beta_matrix.shape)
nabla_beta_matrix += np.dot(covariates.T, Z * counts[:, np.newaxis])
temp = np.exp(fixed_effects)
nabla_beta_matrix += (- np.dot(covariates.T, Z * temp[:, np.newaxis]))
nabla_beta = nabla_beta_matrix.flatten('F')
nabla_beta += self.beta_prior.nabla_beta_log_pdf(beta_estimand, self.J)
output = nabla_beta
return output
def plot_beta(self, beta_samples):
beta_samples_array = np.asarray(beta_samples)
for k in range(self.K):
beta_k_samples = beta_samples_array[:, (k * self.J):((k + 1) * self.J)]
plot_traceplots(beta_k_samples, self.context.covariates_names)
plt.show()
def sample_Z(self):
beta_matrix = np.reshape(self.current_beta, (self.J, self.K), order='F') # build a J x K matrix
f_matrix = np.reshape(self.current_f, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
counts = self.context.counts
covariates = self.context.covariates
fixed_effects_all = np.dot(covariates, beta_matrix)
counts_matrix = np.repeat(counts.reshape((-1, 1)), self.K, axis=1)
poi_lik = counts_matrix * fixed_effects_all - np.exp(fixed_effects_all)
gp_log_softmax = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
prob = softmax(poi_lik + gp_log_softmax, axis=1)
new_Z = zsampler.sample_bulk_categorical(Z.astype(np.int64), prob.astype(np.float64))
return new_Z
def f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
output = 0
temp = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
output += np.sum(np.multiply(Z, temp))
for k in range(self.K):
# GP contribution
output += self.f_prior.get_logpdf(f=f_matrix[:, k])
return output
def nabla_f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
Z = self.current_Z
f_gradient = np.zeros(f_matrix.shape)
# nabla f poisson mixture
temp_matrix = 1 - np.exp(f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis])
inflated_output_matrix = np.multiply(Z, temp_matrix)
f_gradient += gp_deflate_sum(inflated_output_matrix, self.block_assignment_numeric, self.N, self.B, self.K)
for k in range(self.K):
f_gradient[:, k] += self.f_prior.get_nabla_f(f=f_matrix[:, k])
return f_gradient.flatten(order='F')
def plot_f(self, F_samples):
f_array = np.asarray(F_samples).reshape((-1, self.B, self.K), order='F')
S = f_array.shape[0]
# discard irrelevant samples
self.Z_samples = self.Z_samples[(-S):]
Z_samples_array = np.asarray(self.Z_samples)
mixture_allocation = np.zeros((S, self.N, self.K))
mixture_allocation[np.repeat(range(S), self.N), np.tile(range(self.N), S), Z_samples_array.flatten(order='C')] = 1
average_alloc = np.mean(mixture_allocation, axis=0)
for k in range(self.K):
plt.figure()
self.context.plot_realisations(average_alloc[:, k], 111)
plt.show()
# plot a random traceplot
idx1 = np.random.choice(self.B)
plot_traceplots(f_array[:, idx1, :], [f"IDX: {idx1}: K={k}" for k in range(self.K)])
plt.show()
latent_weight_samples = softmax(np.mean(f_array, axis=0), axis=1)
latent_weight_samples_full = gp_inflate_duplicate(latent_weight_samples,
self.block_assignment_numeric,
self.N, self.K)
plt.figure()
for k in range(self.K):
self.context.plot_realisations(latent_weight_samples_full[:, k], 111)
plt.show()
def load_samples_snapshot(self, iteration_no):
beta_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"beta-samples--{self.uid}--{iteration_no}.npy"
F_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"F-samples--{self.uid}--{iteration_no}.npy"
Z_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"Z-samples--{self.uid}--{iteration_no}.npy"
beta_samples = np.load(beta_filepath)
F_samples = np.load(F_filepath)
Z_samples = np.load(Z_filepath)
return beta_samples, Z_samples, F_samples
def __save_output(self, iteration):
folder_name = Path(os.getcwd()) / "models" / "snapshots"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
F_full_path = folder_name / f"F-samples--{self.uid}--{iteration}"
F_samples_array = np.asarray(self.f_sampler.samples)
if F_samples_array.shape[0] > 0:
np.save(F_full_path, F_samples_array[::self.hmc_thinning, :])
beta_full_path = folder_name / f"beta-samples--{self.uid}--{iteration}"
beta_array = np.asarray(self.beta_sampler.samples)
if beta_array.shape[0] > 0:
np.save(beta_full_path, beta_array[::self.hmc_thinning, :])
Z_full_path = folder_name / f"Z-samples--{self.uid}--{iteration}"
Z_array = np.asarray(self.Z_samples)
if Z_array.shape[0] > 0:
np.save(Z_full_path, Z_array[::self.hmc_thinning, :])
def run_sampling(self, number_of_iterations):
iteration = 0
while iteration < number_of_iterations:
##########################################################################################
# BOOKKEEPING
##########################################################################################
# The HMC samplers are independently adaptive and therefore will discard samples during the adaptive phase.
num_current_samples = min(len(self.beta_sampler.samples),
len(self.f_sampler.samples))
self.beta_sampler.samples = self.beta_sampler.samples[(-num_current_samples):]
self.f_sampler.samples = self.f_sampler.samples[(-num_current_samples):]
self.Z_samples = self.Z_samples[(-num_current_samples):]
if (iteration + 1) % self.hmc_info_interval == 0:
self.__save_output(iteration)
##########################################################################################
# SAMPLE BETA
##########################################################################################
self.beta_sampler.sample_one()
self.current_beta = self.beta_sampler.estimand
##########################################################################################
# SAMPLE Z
##########################################################################################
new_Z = self.sample_Z()
self.Z_samples.append(np.where(new_Z > 0)[1])
self.current_Z = new_Z
##########################################################################################
# SAMPLE F
##########################################################################################
self.f_sampler.sample_one()
self.current_f = self.f_sampler.estimand
iteration += 1
self.logger.info("Sampling completed - saving model.")
self.__save_output(iteration)
@click.command()
@click.option('--year', '-y', type=str, default='12013-122015')
@click.option('--type', '-t', default='burglary')
@click.option('--resolution', '-r', type=int, default=400)
@click.option('--model_name', '-m', type=str, default='burglary_raw_4')
@click.option('--interpolation', '-i', type=str, default='weighted')
@click.option('--num_mixtures', '-K', type=int, default=3)
@click.option('--uid', type=str, default=None)
@click.option('--verbose', is_flag=True)
@click.option('--block_type', type=str, default="lad")
@click.option('--collection_unit', type=str, default="lsoa")
@click.option('--lengthscale', type=float, default=1500.0)
def main(year, type, resolution, model_name, interpolation, num_mixtures, uid, verbose,
block_type, collection_unit, lengthscale):
if uid is None:
uid = f"blockmixgp--{block_type}--{type}--{model_name}--{interpolation}--{num_mixtures}--{resolution}-{year}"
log_fmt = '[%(levelname)s] [%(asctime)s] [%(name)s] %(message)s'
datefmt = '%H:%M:%S'
if verbose:
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=log_fmt)
else:
logging.basicConfig(filename=Path('models') / f"log-{uid}.log",
filemode='a',
format=log_fmt,
datefmt=datefmt,
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info("Building the context.")
grid_context = GridContextGeo(interpolation=interpolation,
year=year,
resolution=resolution,
crime_type=type,
model_name=model_name,
cov_collection_unit=collection_unit,
covariates_type='raw')
logger.info("Writing sampling context into a file.")
context_filename = Path(os.getcwd()) / "models" / f"context--{uid}.pickle"
with open(context_filename, 'wb') as context_file:
context_info = {
'context': grid_context,
'K': num_mixtures
}
pickle.dump(context_info, context_file)
logger.info("Initialising the model with estimand and mass matrix diagonal")
hmc_all_iterations = 250_000
hmc_info_interval = 50_000
hmc_thinning = 10
hmc_burn_in = 90_000
hmc_calibration = 150_000
model = BlockMixtureGpSoftmaxAllocation(uid=uid,
grid_context=grid_context,
K=num_mixtures,
hmc_info_interval=hmc_info_interval,
hmc_all_iterations=hmc_all_iterations,
hmc_thinning=hmc_thinning,
hmc_burn_in=hmc_burn_in,
hmc_calibration=hmc_calibration,
block_type=block_type,
verbose=verbose,
lengthscale=lengthscale)
model.run_sampling(number_of_iterations=hmc_all_iterations)
logger.info("Procedure finished.")
if __name__ == "__main__":
load_dotenv(find_dotenv())
main()
| 43.801956
| 122
| 0.561708
|
import logging
import os
import pickle
import sys
from pathlib import Path
import click
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import zsampler
from dotenv import load_dotenv, find_dotenv
from scipy.special import logsumexp, softmax
from src.inference.context_geo import GridContextGeo, gp_inflate_duplicate, gp_deflate_sum
from src.inference.hmc import HMCSampler
from src.inference.priors import BetaPriorWithIntercept, GaussianPrior, GPNonGridPriorSqExpFixed
from src.experiment.visualize import plot_traceplots
class BlockMixtureGpSoftmaxAllocation:
def __init__(self, *, uid=None,
grid_context=None,
K=1,
block_type="msoa",
hmc_all_iterations=100_000,
hmc_burn_in=25_000,
hmc_calibration=50_000,
hmc_info_interval=20_000,
hmc_thinning=5,
verbose=False,
lengthscale=1):
self.uid = uid
self.context = grid_context
self.K = K
self.NN = self.context.mask.shape[0]
self.hmc_thinning = hmc_thinning
self.hmc_info_interval = hmc_info_interval
self.N = grid_context.counts.shape[0]
self.J = self.context.J
initial_Z = np.zeros((self.N, self.K), dtype=int)
initial_Z[np.arange(self.N), np.random.choice(self.K, self.N)] = 1
self.Z_samples = []
if block_type == "lad":
block_assignment = np.asarray(grid_context.lads)
elif block_type == "msoa":
block_assignment = np.asarray(grid_context.msoas)
elif block_type == "ward":
block_assignment = np.asarray(grid_context.wards)
else:
block_assignment = np.repeat(1, self.N)
block_centroid_file_path = Path(os.getcwd()) / "data" / "processed" / f"{block_type}-centroids-map.csv"
block_centroids = pd.read_csv(block_centroid_file_path)
self.coord_x = block_centroids["x"].values
self.coord_x = self.coord_x - np.min(self.coord_x)
self.coord_y = block_centroids["y"].values
self.coord_y = self.coord_y - np.min(self.coord_y)
self.block_labels = block_centroids.iloc[:, 1].values
unique_block_labels = np.unique(self.block_labels)
self.block_assignment_numeric = np.zeros(block_assignment.shape[0], dtype=np.int)
for idx_cell, block_label in enumerate(block_assignment):
self.block_assignment_numeric[idx_cell] = np.where(unique_block_labels == block_label)[0]
self.block_assignment = block_assignment
B = np.max(self.block_assignment_numeric) + 1
self.B = B
self.lengthscale = lengthscale
self.beta_prior = BetaPriorWithIntercept(a=1, b=0.01)
self.f_prior = GPNonGridPriorSqExpFixed(coord_x=self.coord_x, coord_y=self.coord_y,
variance=100, lengthscale=self.lengthscale)
self.log_theta_prior = GaussianPrior(mean=np.asarray([0]), variance=np.asarray([1e2]))
init_beta_estimand = np.random.normal(0, 1, self.context.J * self.K)
init_beta_mass_matrix = 1e3 * np.ones(self.context.J * self.K)
self.beta_sampler = HMCSampler(func_lpdf=self.beta_loglik,
func_nabla_lpdf=self.nabla_beta_loglik,
func_plot=self.plot_beta if verbose else None,
init_estimand=init_beta_estimand,
init_M_diag=init_beta_mass_matrix,
init_L=20,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=True)
init_f_estimand = np.random.normal(0, 1, B * self.K)
init_f_mass_matrix = 1e4 * np.ones(B * self.K)
self.f_sampler = HMCSampler(func_lpdf=self.f_loglik,
func_nabla_lpdf=self.nabla_f_loglik,
func_plot=self.plot_f if verbose else None,
init_estimand=init_f_estimand,
init_M_diag=init_f_mass_matrix,
init_L=100,
init_epsilon=5.0e-2,
n_burnin=hmc_burn_in,
n_calib=hmc_calibration,
S=hmc_all_iterations,
n_info_interval=hmc_info_interval,
thinning=hmc_thinning,
unique_estimation_id=uid,
adaptive=False)
self.current_beta = self.beta_sampler.estimand
self.current_f = self.f_sampler.estimand
self.current_Z = initial_Z
self.logger = logging.getLogger(__name__)
def beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F')
Z = self.current_Z
counts = self.context.counts
covariates = self.context.covariates
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
poisson_part = np.sum(np.multiply(counts, fixed_effects) - np.exp(fixed_effects))
beta_part = self.beta_prior.log_pdf(beta_estimand, self.J)
output = poisson_part + beta_part
return output
def nabla_beta_loglik(self, beta_estimand):
beta_matrix = np.reshape(beta_estimand, (self.J, self.K), order='F')
counts = self.context.counts
covariates = self.context.covariates
Z = self.current_Z
fixed_effects = np.sum(np.multiply(Z, np.dot(covariates, beta_matrix)), axis=1)
nabla_beta_matrix = np.zeros(beta_matrix.shape)
nabla_beta_matrix += np.dot(covariates.T, Z * counts[:, np.newaxis])
temp = np.exp(fixed_effects)
nabla_beta_matrix += (- np.dot(covariates.T, Z * temp[:, np.newaxis]))
nabla_beta = nabla_beta_matrix.flatten('F')
nabla_beta += self.beta_prior.nabla_beta_log_pdf(beta_estimand, self.J)
output = nabla_beta
return output
def plot_beta(self, beta_samples):
beta_samples_array = np.asarray(beta_samples)
for k in range(self.K):
beta_k_samples = beta_samples_array[:, (k * self.J):((k + 1) * self.J)]
plot_traceplots(beta_k_samples, self.context.covariates_names)
plt.show()
def sample_Z(self):
beta_matrix = np.reshape(self.current_beta, (self.J, self.K), order='F')
f_matrix = np.reshape(self.current_f, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
counts = self.context.counts
covariates = self.context.covariates
fixed_effects_all = np.dot(covariates, beta_matrix)
counts_matrix = np.repeat(counts.reshape((-1, 1)), self.K, axis=1)
poi_lik = counts_matrix * fixed_effects_all - np.exp(fixed_effects_all)
gp_log_softmax = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
prob = softmax(poi_lik + gp_log_softmax, axis=1)
new_Z = zsampler.sample_bulk_categorical(Z.astype(np.int64), prob.astype(np.float64))
return new_Z
def f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
Z = self.current_Z
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
output = 0
temp = f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis]
output += np.sum(np.multiply(Z, temp))
for k in range(self.K):
output += self.f_prior.get_logpdf(f=f_matrix[:, k])
return output
def nabla_f_loglik(self, F_estimand):
f_matrix = np.reshape(F_estimand, (self.B, self.K), order='F')
f_full_matrix = gp_inflate_duplicate(f_matrix,
self.block_assignment_numeric,
self.N, self.K)
Z = self.current_Z
f_gradient = np.zeros(f_matrix.shape)
temp_matrix = 1 - np.exp(f_full_matrix - logsumexp(f_full_matrix, axis=1)[:, np.newaxis])
inflated_output_matrix = np.multiply(Z, temp_matrix)
f_gradient += gp_deflate_sum(inflated_output_matrix, self.block_assignment_numeric, self.N, self.B, self.K)
for k in range(self.K):
f_gradient[:, k] += self.f_prior.get_nabla_f(f=f_matrix[:, k])
return f_gradient.flatten(order='F')
def plot_f(self, F_samples):
f_array = np.asarray(F_samples).reshape((-1, self.B, self.K), order='F')
S = f_array.shape[0]
self.Z_samples = self.Z_samples[(-S):]
Z_samples_array = np.asarray(self.Z_samples)
mixture_allocation = np.zeros((S, self.N, self.K))
mixture_allocation[np.repeat(range(S), self.N), np.tile(range(self.N), S), Z_samples_array.flatten(order='C')] = 1
average_alloc = np.mean(mixture_allocation, axis=0)
for k in range(self.K):
plt.figure()
self.context.plot_realisations(average_alloc[:, k], 111)
plt.show()
idx1 = np.random.choice(self.B)
plot_traceplots(f_array[:, idx1, :], [f"IDX: {idx1}: K={k}" for k in range(self.K)])
plt.show()
latent_weight_samples = softmax(np.mean(f_array, axis=0), axis=1)
latent_weight_samples_full = gp_inflate_duplicate(latent_weight_samples,
self.block_assignment_numeric,
self.N, self.K)
plt.figure()
for k in range(self.K):
self.context.plot_realisations(latent_weight_samples_full[:, k], 111)
plt.show()
def load_samples_snapshot(self, iteration_no):
beta_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"beta-samples--{self.uid}--{iteration_no}.npy"
F_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"F-samples--{self.uid}--{iteration_no}.npy"
Z_filepath = Path(os.getcwd()) / "models" / "snapshots" / f"Z-samples--{self.uid}--{iteration_no}.npy"
beta_samples = np.load(beta_filepath)
F_samples = np.load(F_filepath)
Z_samples = np.load(Z_filepath)
return beta_samples, Z_samples, F_samples
def __save_output(self, iteration):
folder_name = Path(os.getcwd()) / "models" / "snapshots"
if not os.path.exists(folder_name):
os.makedirs(folder_name)
F_full_path = folder_name / f"F-samples--{self.uid}--{iteration}"
F_samples_array = np.asarray(self.f_sampler.samples)
if F_samples_array.shape[0] > 0:
np.save(F_full_path, F_samples_array[::self.hmc_thinning, :])
beta_full_path = folder_name / f"beta-samples--{self.uid}--{iteration}"
beta_array = np.asarray(self.beta_sampler.samples)
if beta_array.shape[0] > 0:
np.save(beta_full_path, beta_array[::self.hmc_thinning, :])
Z_full_path = folder_name / f"Z-samples--{self.uid}--{iteration}"
Z_array = np.asarray(self.Z_samples)
if Z_array.shape[0] > 0:
np.save(Z_full_path, Z_array[::self.hmc_thinning, :])
def run_sampling(self, number_of_iterations):
iteration = 0
while iteration < number_of_iterations:
| true
| true
|
790b89703b3e2dfa5ed76b30a81ee851ae89b036
| 48,850
|
py
|
Python
|
supar/parsers/dep.py
|
LiBinNLP/HOSDP
|
f0806d1c27c9d5233002836e1825a1567891d928
|
[
"MIT"
] | 4
|
2022-01-28T18:32:54.000Z
|
2022-02-07T08:31:35.000Z
|
supar/parsers/dep.py
|
LiBinNLP/HOSDP
|
f0806d1c27c9d5233002836e1825a1567891d928
|
[
"MIT"
] | null | null | null |
supar/parsers/dep.py
|
LiBinNLP/HOSDP
|
f0806d1c27c9d5233002836e1825a1567891d928
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from supar.models import (BiaffineDependencyModel, CRF2oDependencyModel,
CRFDependencyModel, VIDependencyModel)
from supar.parsers.parser import Parser
from supar.utils import Config, Dataset, Embedding
from supar.utils.common import BOS, PAD, UNK
from supar.utils.field import ChartField, Field, RawField, SubwordField
from supar.utils.fn import ispunct
from supar.utils.logging import get_logger, progress_bar
from supar.utils.metric import AttachmentMetric
from supar.utils.transform import CoNLL
logger = get_logger(__name__)
class BiaffineDependencyParser(Parser):
r"""
The implementation of Biaffine Dependency Parser :cite:`dozat-etal-2017-biaffine`.
"""
NAME = 'biaffine-dependency'
MODEL = BiaffineDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.TAG = self.transform.CPOS
self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000,
punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=False, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'biaffine-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('biaffine-dep-en')
>>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.softmax(-1).unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary.
Required if taking words as encoder input.
Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class CRFDependencyParser(BiaffineDependencyParser):
r"""
The implementation of first-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf-dependency'
MODEL = CRFDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf-dep-en')
>>> parser = Parser.load('./ptb.crf.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf(s_arc, mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
class CRF2oDependencyParser(BiaffineDependencyParser):
r"""
The implementation of second-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf2o-dependency'
MODEL = CRF2oDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf2o-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf2o-dep-en')
>>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf((s_arc, s_sib), mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary. Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class VIDependencyParser(BiaffineDependencyParser):
r"""
The implementation of Dependency Parser using Variational Inference (:cite:`wang-tu-2020-second`).
"""
NAME = 'vi-dependency'
MODEL = VIDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'vi-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('vi-dep-en')
>>> parser = Parser.load('./ptb.vi.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
s_arc = self.model.inference((s_arc, s_sib), mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
| 45.484171
| 126
| 0.555988
|
import os
import torch
import torch.nn as nn
from supar.models import (BiaffineDependencyModel, CRF2oDependencyModel,
CRFDependencyModel, VIDependencyModel)
from supar.parsers.parser import Parser
from supar.utils import Config, Dataset, Embedding
from supar.utils.common import BOS, PAD, UNK
from supar.utils.field import ChartField, Field, RawField, SubwordField
from supar.utils.fn import ispunct
from supar.utils.logging import get_logger, progress_bar
from supar.utils.metric import AttachmentMetric
from supar.utils.transform import CoNLL
logger = get_logger(__name__)
class BiaffineDependencyParser(Parser):
NAME = 'biaffine-dependency'
MODEL = BiaffineDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.TAG = self.transform.CPOS
self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000,
punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=False, verbose=True, **kwargs):
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.softmax(-1).unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class CRFDependencyParser(BiaffineDependencyParser):
NAME = 'crf-dependency'
MODEL = CRFDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf(s_arc, mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
class CRF2oDependencyParser(BiaffineDependencyParser):
NAME = 'crf2o-dependency'
MODEL = CRF2oDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf((s_arc, s_sib), mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class VIDependencyParser(BiaffineDependencyParser):
NAME = 'vi-dependency'
MODEL = VIDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
tree=True, proj=True, partial=False, verbose=True, **kwargs):
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=True, verbose=True, **kwargs):
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
s_arc = self.model.inference((s_arc, s_sib), mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
| true
| true
|
790b89af260321ccc15fa02ebd7012c038573d0b
| 1,456
|
py
|
Python
|
pyvisdk/do/virtual_machine_runtime_info.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/virtual_machine_runtime_info.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/virtual_machine_runtime_info.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VirtualMachineRuntimeInfo(vim, *args, **kwargs):
'''The RuntimeInfo data object type provides information about the execution state
and history of a virtual machine.'''
obj = vim.client.factory.create('{urn:vim25}VirtualMachineRuntimeInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 7:
raise IndexError('Expected at least 8 arguments got: %d' % len(args))
required = [ 'connectionState', 'consolidationNeeded', 'faultToleranceState',
'numMksConnections', 'powerState', 'recordReplayState', 'toolsInstallerMounted' ]
optional = [ 'bootTime', 'cleanPowerOff', 'dasVmProtection', 'device', 'host',
'maxCpuUsage', 'maxMemoryUsage', 'memoryOverhead', 'minRequiredEVCModeKey',
'needSecondaryReason', 'question', 'suspendInterval', 'suspendTime',
'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 38.315789
| 124
| 0.644231
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
| true
| true
|
790b89f0bbbe8f19e33245d054a82e4e97f904d5
| 7,302
|
py
|
Python
|
src/otoole/preprocess/narrow_to_datafile.py
|
chrwm/otoole
|
f527eb1fdf75cc6872457a6e5145f678f5d34693
|
[
"MIT"
] | null | null | null |
src/otoole/preprocess/narrow_to_datafile.py
|
chrwm/otoole
|
f527eb1fdf75cc6872457a6e5145f678f5d34693
|
[
"MIT"
] | null | null | null |
src/otoole/preprocess/narrow_to_datafile.py
|
chrwm/otoole
|
f527eb1fdf75cc6872457a6e5145f678f5d34693
|
[
"MIT"
] | null | null | null |
import logging
import sys
from abc import abstractmethod
from typing import TextIO
import pandas as pd
from datapackage import Package
from pandas_datapackage_reader import read_datapackage
from sqlalchemy import create_engine
from otoole import read_packaged_file
logger = logging.getLogger(__name__)
class DataPackageTo(object):
"""Convert a data package to another format
Arguments
---------
datapackage: str
The path to the databackage
datafilepath: str
The path to the destination file or folder
sql: bool, default=False
Flag to set whether the source datapackage is in sqlite format
"""
def __init__(self, datapackage: str, datafilepath: str, sql: bool = False):
self.datapackage = datapackage
self.datafilepath = datafilepath
self.sql = sql
self.package = self._get_package()
self.default_values = self._get_default_values()
self.config = read_packaged_file("config.yaml", "otoole.preprocess")
def _get_package(self):
if self.sql:
engine = create_engine("sqlite:///{}".format(self.datapackage))
package = Package(storage="sql", engine=engine)
else:
package = read_datapackage(self.datapackage) # typing: datapackage.Package
return package
def _get_default_values(self):
default_resource = (
self.package.pop("default_values").set_index("name").to_dict()
)
return default_resource["default_value"]
def convert(self):
"""Perform the conversion from datapackage to destination format
"""
handle = self._header()
logger.debug(self.default_values)
for name, df in self.package.items():
logger.debug(name)
if df.empty:
columns = [x["name"] for x in df._metadata["schema"]["fields"]]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if "index" in df.columns:
df = df.drop(columns="index")
logger.debug("Number of columns: %s, %s", len(df.columns), df.columns)
if len(df.columns) > 1:
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()
@abstractmethod
def _header(self) -> TextIO:
raise NotImplementedError()
@abstractmethod
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
) -> pd.DataFrame:
"""Write parameter data"""
raise NotImplementedError()
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
"""Write set data"""
raise NotImplementedError()
@abstractmethod
def _footer(self, handle: TextIO):
raise NotImplementedError()
class DataPackageToCsv(DataPackageTo):
def _header(self):
filepath = open(self.datafilepath, "w")
msg = "# Model file written by *otoole*\n"
filepath.write(msg)
return filepath
def _form_parameter(self, df: pd.DataFrame, default: float):
df = df[df.VALUE != default]
return df
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
):
"""Write parameter data to a csv file, omitting data which matches the default value
Arguments
---------
filepath : StreamIO
df : pandas.DataFrame
parameter_name : str
handle: TextIO
default : int
"""
df = self._form_parameter(df, default)
handle.write("param default {} : {} :=\n".format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
"""
Arguments
---------
df : pandas.DataFrame
set_name : str
handle: TextIO
"""
handle.write("set {} :=\n".format(set_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _footer(self, handle: TextIO):
handle.write("end;\n")
handle.close()
class DataPackageToExcel(DataPackageTo):
def _header(self):
return pd.ExcelWriter(self.datafilepath, mode="w")
def _form_parameter(
self, df: pd.DataFrame, parameter_name: str, default: float
) -> pd.DataFrame:
"""Converts data into wide format
Arguments
---------
df: pd.DataFrame
parameter_name: str
default: float
Returns
-------
pandas.DataFrame
"""
if not df.empty:
names = df.columns.to_list()
if len(names) > 2:
logger.debug(
"More than 2 columns for {}: {}".format(parameter_name, names)
)
rows = names[0:-2]
columns = names[-2]
values = names[-1]
logger.debug("Rows: {}; columns: {}; values: {}", rows, columns, values)
logger.debug("dtypes: {}".format(df.dtypes))
pivot = pd.pivot_table(
df, index=rows, columns=columns, values=values, fill_value=default
)
elif len(names) == 2:
logger.debug("Two columns for {}: {}".format(parameter_name, names))
values = names[-1]
rows = names[0:-2]
logger.debug("Rows: {}; values: {}", rows, values)
pivot = pd.pivot_table(
df, index=rows, values=values, fill_value=default
)
else:
logger.debug("One column for {}: {}".format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug("Dataframe {} is empty".format(parameter_name))
pivot = df.copy()
return pivot
def _write_parameter(
self,
df: pd.DataFrame,
parameter_name: str,
handle: pd.ExcelWriter,
default: float,
):
df = self._form_parameter(df, parameter_name, default)
df.to_excel(handle, sheet_name=parameter_name, merge_cells=False)
def _write_set(self, df: pd.DataFrame, set_name, handle: pd.ExcelWriter):
df.to_excel(handle, sheet_name=set_name, merge_cells=False, index=False)
def _footer(self, handle=pd.ExcelWriter):
handle.close()
def convert_datapackage_to_datafile(path_to_datapackage, path_to_datafile):
dp = DataPackageToCsv(path_to_datapackage, path_to_datafile)
dp.convert()
def convert_datapackage_to_excel(path_to_datapackage, path_to_excel):
dp = DataPackageToExcel(path_to_datapackage, path_to_excel)
dp.convert()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
path_to_datapackage = sys.argv[1]
path_to_datafile = sys.argv[2]
DataPackageToCsv(path_to_datapackage, path_to_datafile)
| 30.425
| 92
| 0.598877
|
import logging
import sys
from abc import abstractmethod
from typing import TextIO
import pandas as pd
from datapackage import Package
from pandas_datapackage_reader import read_datapackage
from sqlalchemy import create_engine
from otoole import read_packaged_file
logger = logging.getLogger(__name__)
class DataPackageTo(object):
def __init__(self, datapackage: str, datafilepath: str, sql: bool = False):
self.datapackage = datapackage
self.datafilepath = datafilepath
self.sql = sql
self.package = self._get_package()
self.default_values = self._get_default_values()
self.config = read_packaged_file("config.yaml", "otoole.preprocess")
def _get_package(self):
if self.sql:
engine = create_engine("sqlite:///{}".format(self.datapackage))
package = Package(storage="sql", engine=engine)
else:
package = read_datapackage(self.datapackage)
return package
def _get_default_values(self):
default_resource = (
self.package.pop("default_values").set_index("name").to_dict()
)
return default_resource["default_value"]
def convert(self):
handle = self._header()
logger.debug(self.default_values)
for name, df in self.package.items():
logger.debug(name)
if df.empty:
columns = [x["name"] for x in df._metadata["schema"]["fields"]]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if "index" in df.columns:
df = df.drop(columns="index")
logger.debug("Number of columns: %s, %s", len(df.columns), df.columns)
if len(df.columns) > 1:
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()
@abstractmethod
def _header(self) -> TextIO:
raise NotImplementedError()
@abstractmethod
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
) -> pd.DataFrame:
raise NotImplementedError()
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
raise NotImplementedError()
@abstractmethod
def _footer(self, handle: TextIO):
raise NotImplementedError()
class DataPackageToCsv(DataPackageTo):
def _header(self):
filepath = open(self.datafilepath, "w")
msg = "# Model file written by *otoole*\n"
filepath.write(msg)
return filepath
def _form_parameter(self, df: pd.DataFrame, default: float):
df = df[df.VALUE != default]
return df
def _write_parameter(
self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float
):
df = self._form_parameter(df, default)
handle.write("param default {} : {} :=\n".format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
handle.write("set {} :=\n".format(set_name))
df.to_csv(path_or_buf=handle, sep=" ", header=False, index=False)
handle.write(";\n")
def _footer(self, handle: TextIO):
handle.write("end;\n")
handle.close()
class DataPackageToExcel(DataPackageTo):
def _header(self):
return pd.ExcelWriter(self.datafilepath, mode="w")
def _form_parameter(
self, df: pd.DataFrame, parameter_name: str, default: float
) -> pd.DataFrame:
if not df.empty:
names = df.columns.to_list()
if len(names) > 2:
logger.debug(
"More than 2 columns for {}: {}".format(parameter_name, names)
)
rows = names[0:-2]
columns = names[-2]
values = names[-1]
logger.debug("Rows: {}; columns: {}; values: {}", rows, columns, values)
logger.debug("dtypes: {}".format(df.dtypes))
pivot = pd.pivot_table(
df, index=rows, columns=columns, values=values, fill_value=default
)
elif len(names) == 2:
logger.debug("Two columns for {}: {}".format(parameter_name, names))
values = names[-1]
rows = names[0:-2]
logger.debug("Rows: {}; values: {}", rows, values)
pivot = pd.pivot_table(
df, index=rows, values=values, fill_value=default
)
else:
logger.debug("One column for {}: {}".format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug("Dataframe {} is empty".format(parameter_name))
pivot = df.copy()
return pivot
def _write_parameter(
self,
df: pd.DataFrame,
parameter_name: str,
handle: pd.ExcelWriter,
default: float,
):
df = self._form_parameter(df, parameter_name, default)
df.to_excel(handle, sheet_name=parameter_name, merge_cells=False)
def _write_set(self, df: pd.DataFrame, set_name, handle: pd.ExcelWriter):
df.to_excel(handle, sheet_name=set_name, merge_cells=False, index=False)
def _footer(self, handle=pd.ExcelWriter):
handle.close()
def convert_datapackage_to_datafile(path_to_datapackage, path_to_datafile):
dp = DataPackageToCsv(path_to_datapackage, path_to_datafile)
dp.convert()
def convert_datapackage_to_excel(path_to_datapackage, path_to_excel):
dp = DataPackageToExcel(path_to_datapackage, path_to_excel)
dp.convert()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
path_to_datapackage = sys.argv[1]
path_to_datafile = sys.argv[2]
DataPackageToCsv(path_to_datapackage, path_to_datafile)
| true
| true
|
790b8a83c0629355a24ed8c345356bc43a56f144
| 55,886
|
py
|
Python
|
src/prefect/client/client.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/client/client.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
src/prefect/client/client.py
|
zmac12/prefect
|
7fe55a83f275a01d95268ff9e4bd5f5b349728e1
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import json
import os
import re
import time
import uuid
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union
from urllib.parse import urljoin
# if simplejson is installed, `requests` defaults to using it instead of json
# this allows the client to gracefully handle either json or simplejson
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
import pendulum
import toml
from slugify import slugify
import prefect
from prefect.utilities.exceptions import (
AuthorizationError,
ClientError,
VersionLockError,
)
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
parse_graphql,
with_args,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
from prefect.core import Flow
import requests
JSONLike = Union[bool, dict, list, str, int, float, None]
# type definitions for GraphQL results
TaskRunInfoResult = NamedTuple(
"TaskRunInfoResult",
[
("id", str),
("task_id", str),
("task_slug", str),
("version", int),
("state", "prefect.engine.state.State"),
],
)
FlowRunInfoResult = NamedTuple(
"FlowRunInfoResult",
[
("id", str),
("name", str),
("flow_id", str),
("parameters", Dict[str, Any]),
("context", Dict[str, Any]),
("version", int),
("scheduled_start_time", datetime.datetime),
("state", "prefect.engine.state.State"),
("task_runs", List[TaskRunInfoResult]),
],
)
class Client:
"""
Client for communication with Prefect Cloud
If the arguments aren't specified the client initialization first checks the prefect
configuration and if the server is not set there it checks the current context. The
token will only be present in the current context.
Args:
- api_server (str, optional): the URL to send all GraphQL requests
to; if not provided, will be pulled from `cloud.graphql` config var
- api_token (str, optional): a Prefect Cloud API token, taken from
`config.cloud.auth_token` if not provided. If this token is USER-scoped, it may
be used to log in to any tenant that the user is a member of. In that case,
ephemeral JWTs will be loaded as necessary. Otherwise, the API token itself
will be used as authorization.
"""
def __init__(self, api_server: str = None, api_token: str = None):
self._access_token = None
self._refresh_token = None
self._access_token_expires_at = pendulum.now()
self._active_tenant_id = None
self._attached_headers = {} # type: Dict[str, str]
self.logger = create_diagnostic_logger("Diagnostics")
# store api server
self.api_server = api_server or prefect.context.config.cloud.get("graphql")
# store api token
self._api_token = api_token or prefect.context.config.cloud.get(
"auth_token", None
)
if prefect.config.backend == "cloud":
if not self._api_token:
# if no api token was passed, attempt to load state from local storage
settings = self._load_local_settings()
self._api_token = settings.get("api_token")
if self._api_token:
self._active_tenant_id = settings.get("active_tenant_id")
if self._active_tenant_id:
try:
self.login_to_tenant(tenant_id=self._active_tenant_id)
except AuthorizationError:
# if an authorization error is raised, then the token is invalid and should
# be cleared
self.logout_from_tenant()
else:
# TODO: Separate put this functionality and clean up initial tenant access handling
if not self._active_tenant_id:
tenant_info = self.graphql({"query": {"tenant": {"id"}}})
if tenant_info.data.tenant:
self._active_tenant_id = tenant_info.data.tenant[0].id
def create_tenant(self, name: str, slug: str = None) -> str:
"""
Creates a new tenant.
Note this route only works when run against Prefect Server.
Args:
- name (str): the name of the tenant to create
- slug (str, optional): the slug of the tenant to create; defaults to name
Returns:
- str: the ID of the newly created tenant, or the ID of the currently active tenant
Raises:
- ValueError: if run against Prefect Cloud
"""
if prefect.config.backend != "server":
msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/"
raise ValueError(msg)
if slug is None:
slug = slugify(name)
tenant_info = self.graphql(
{
"mutation($input: create_tenant_input!)": {
"create_tenant(input: $input)": {"id"}
}
},
variables=dict(input=dict(name=name, slug=slug)),
)
return tenant_info.data.create_tenant.id
# -------------------------------------------------------------------------
# Utilities
def get(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and GET request
Args:
- path (str): the path of the API url. For example, to GET
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the GET request to;
defaults to `self.api_server`
- headers (dict, optional): Headers to pass with the request
- params (dict): GET parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="GET",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def post(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
"""
Convenience function for calling the Prefect API with token auth and POST request
Args:
- path (str): the path of the API url. For example, to POST
http://prefect-server/v1/auth/login, path would be 'auth/login'.
- server (str, optional): the server to send the POST request to;
defaults to `self.api_server`
- headers(dict): headers to pass with the request
- params (dict): POST parameters
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Dictionary representation of the request made
"""
response = self._request(
method="POST",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> GraphQLResult:
"""
Convenience function for running queries against the Prefect GraphQL API
Args:
- query (Any): A representation of a graphql query to be executed. It will be
parsed by prefect.utilities.graphql.parse_graphql().
- raise_on_error (bool): if True, a `ClientError` will be raised if the GraphQL
returns any `errors`.
- headers (dict): any additional headers that should be passed as part of the
request
- variables (dict): Variables to be filled into a query with the key being
equivalent to the variables that are accepted by the query
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- dict: Data returned from the GraphQL query
Raises:
- ClientError if there are errors raised by the GraphQL mutation
"""
result = self.post(
path="",
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
token=token,
retry_on_api_error=retry_on_api_error,
)
if raise_on_error and "errors" in result:
if "UNAUTHENTICATED" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif "Malformed Authorization header" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif (
result["errors"][0].get("extensions", {}).get("code")
== "VERSION_LOCKING_ERROR"
):
raise VersionLockError(result["errors"])
raise ClientError(result["errors"])
else:
return GraphQLResult(result) # type: ignore
def _send_request(
self,
session: "requests.Session",
method: str,
url: str,
params: Dict[str, JSONLike] = None,
headers: dict = None,
) -> "requests.models.Response":
if prefect.context.config.cloud.get("diagnostics") is True:
self.logger.debug(f"Preparing request to {url}")
clean_headers = {
head: re.sub("Bearer .*", "Bearer XXXX", val)
for head, val in headers.items() # type: ignore
}
self.logger.debug(f"Headers: {clean_headers}")
self.logger.debug(f"Request: {params}")
start_time = time.time()
if method == "GET":
response = session.get(url, headers=headers, params=params, timeout=30)
elif method == "POST":
response = session.post(url, headers=headers, json=params, timeout=30)
elif method == "DELETE":
response = session.delete(url, headers=headers, timeout=30)
else:
raise ValueError("Invalid method: {}".format(method))
if prefect.context.config.cloud.get("diagnostics") is True:
end_time = time.time()
self.logger.debug(f"Response: {response.json()}")
self.logger.debug(
f"Request duration: {round(end_time - start_time, 4)} seconds"
)
# Check if request returned a successful status
response.raise_for_status()
return response
def _request(
self,
method: str,
path: str,
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
token: str = None,
retry_on_api_error: bool = True,
) -> "requests.models.Response":
"""
Runs any specified request (GET, POST, DELETE) against the server
Args:
- method (str): The type of request to be made (GET, POST, DELETE)
- path (str): Path of the API URL
- params (dict, optional): Parameters used for the request
- server (str, optional): The server to make requests against, base API
server is used if not specified
- headers (dict, optional): Headers to pass with the request
- token (str): an auth token. If not supplied, the `client.access_token` is used.
- retry_on_api_error (bool): whether the operation should be retried if the API returns
an API_ERROR code
Returns:
- requests.models.Response: The response returned from the request
Raises:
- ClientError: if the client token is not in the context (due to not being logged in)
- ValueError: if a method is specified outside of the accepted GET, POST, DELETE
- requests.HTTPError: if a status code is returned that is not `200` or `401`
"""
if server is None:
server = self.api_server
assert isinstance(server, str) # mypy assert
if token is None:
token = self.get_auth_token()
# 'import requests' is expensive time-wise, we should do this just-in-time to keep
# the 'import prefect' time low
import requests
url = urljoin(server, path.lstrip("/")).rstrip("/")
params = params or {}
headers = headers or {}
if token:
headers["Authorization"] = "Bearer {}".format(token)
headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__)
if self._attached_headers:
headers.update(self._attached_headers)
session = requests.Session()
retry_total = 6 if prefect.config.backend == "cloud" else 1
retries = requests.packages.urllib3.util.retry.Retry(
total=retry_total,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=["DELETE", "GET", "POST"],
)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries))
response = self._send_request(
session=session, method=method, url=url, params=params, headers=headers
)
# parse the response
try:
json_resp = response.json()
except JSONDecodeError as exc:
if prefect.config.backend == "cloud" and "Authorization" not in headers:
raise ClientError(
"Malformed response received from Cloud - please ensure that you "
"have an API token properly configured."
) from exc
else:
raise ClientError("Malformed response received from API.") from exc
# check if there was an API_ERROR code in the response
if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error:
success, retry_count = False, 0
# retry up to six times
while success is False and retry_count < 6:
response = self._send_request(
session=session,
method=method,
url=url,
params=params,
headers=headers,
)
if "API_ERROR" in str(response.json().get("errors")):
retry_count += 1
time.sleep(0.25 * (2 ** (retry_count - 1)))
else:
success = True
return response
def attach_headers(self, headers: dict) -> None:
"""
Set headers to be attached to this Client
Args:
- headers (dict): A dictionary of headers to attach to this client. These headers
get added on to the existing dictionary of headers.
"""
self._attached_headers.update(headers)
# -------------------------------------------------------------------------
# Auth
# -------------------------------------------------------------------------
@property
def _local_settings_path(self) -> Path:
"""
Returns the local settings directory corresponding to the current API servers
"""
path = "{home}/client/{server}".format(
home=prefect.context.config.home_dir,
server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
)
return Path(os.path.expanduser(path)) / "settings.toml"
def _save_local_settings(self, settings: dict) -> None:
"""
Writes settings to local storage
"""
self._local_settings_path.parent.mkdir(exist_ok=True, parents=True)
with self._local_settings_path.open("w+") as f:
toml.dump(settings, f)
def _load_local_settings(self) -> dict:
"""
Loads settings from local storage
"""
if self._local_settings_path.exists():
with self._local_settings_path.open("r") as f:
return toml.load(f) # type: ignore
return {}
def save_api_token(self) -> None:
"""
Saves the API token in local storage.
"""
settings = self._load_local_settings()
settings["api_token"] = self._api_token
self._save_local_settings(settings)
def get_auth_token(self) -> str:
"""
Returns an auth token:
- if no explicit access token is stored, returns the api token
- if there is an access token:
- if there's a refresh token and the access token expires in the next 30 seconds,
then we refresh the access token and store the result
- return the access token
Returns:
- str: the access token
"""
if not self._access_token:
return self._api_token
expiration = self._access_token_expires_at or pendulum.now()
if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
self._refresh_access_token()
return self._access_token
def get_available_tenants(self) -> List[Dict]:
"""
Returns a list of available tenants.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- List[Dict]: a list of dictionaries containing the id, slug, and name of
available tenants
"""
result = self.graphql(
{"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
# use the API token to see all available tenants
token=self._api_token,
) # type: ignore
return result.data.tenant # type: ignore
def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
"""
Log in to a specific tenant
NOTE: this should only be called by users who have provided a USER-scoped API token.
Args:
- tenant_slug (str): the tenant's slug
- tenant_id (str): the tenant's id
Returns:
- bool: True if the login was successful
Raises:
- ValueError: if at least one of `tenant_slug` or `tenant_id` isn't provided
- ValueError: if the `tenant_id` is not a valid UUID
- ValueError: if no matching tenants are found
"""
if tenant_slug is None and tenant_id is None:
raise ValueError(
"At least one of `tenant_slug` or `tenant_id` must be provided."
)
elif tenant_id:
try:
uuid.UUID(tenant_id)
except ValueError as exc:
raise ValueError("The `tenant_id` must be a valid UUID.") from exc
tenant = self.graphql(
{
"query($slug: String, $id: uuid)": {
"tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
}
},
variables=dict(slug=tenant_slug, id=tenant_id),
# use the API token to query the tenant
token=self._api_token,
) # type: ignore
if not tenant.data.tenant: # type: ignore
raise ValueError("No matching tenants found.")
tenant_id = tenant.data.tenant[0].id # type: ignore
if prefect.config.backend == "cloud":
payload = self.graphql(
{
"mutation($input: switch_tenant_input!)": {
"switch_tenant(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(tenant_id=tenant_id)),
# Use the API token to switch tenants
token=self._api_token,
) # type: ignore
self._access_token = payload.data.switch_tenant.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.switch_tenant.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.switch_tenant.refresh_token # type: ignore
self._active_tenant_id = tenant_id
# save the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = self._active_tenant_id
self._save_local_settings(settings)
return True
def logout_from_tenant(self) -> None:
self._access_token = None
self._refresh_token = None
self._active_tenant_id = None
# remove the tenant setting
settings = self._load_local_settings()
settings["active_tenant_id"] = None
self._save_local_settings(settings)
def _refresh_access_token(self) -> bool:
"""
Refresh the client's JWT access token.
NOTE: this should only be called by users who have provided a USER-scoped API token.
Returns:
- bool: True if the refresh succeeds
"""
payload = self.graphql(
{
"mutation($input: refresh_token_input!)": {
"refresh_token(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(access_token=self._access_token)),
# pass the refresh token as the auth header
token=self._refresh_token,
) # type: ignore
self._access_token = payload.data.refresh_token.access_token # type: ignore
self._access_token_expires_at = pendulum.parse( # type: ignore
payload.data.refresh_token.expires_at # type: ignore
) # type: ignore
self._refresh_token = payload.data.refresh_token.refresh_token # type: ignore
return True
# -------------------------------------------------------------------------
# Actions
# -------------------------------------------------------------------------
def register(
self,
flow: "Flow",
project_name: str = None,
build: bool = True,
set_schedule_active: bool = True,
version_group_id: str = None,
compressed: bool = True,
no_url: bool = False,
) -> str:
"""
Push a new flow to Prefect Cloud
Args:
- flow (Flow): a flow to register
- project_name (str, optional): the project that should contain this flow.
- build (bool, optional): if `True`, the flow's environment is built
prior to serialization; defaults to `True`
- set_schedule_active (bool, optional): if `False`, will set the schedule to
inactive in the database to prevent auto-scheduling runs (if the Flow has a
schedule). Defaults to `True`. This can be changed later.
- version_group_id (str, optional): the UUID version group ID to use for versioning
this Flow in Cloud; if not provided, the version group ID associated with this
Flow's project and name will be used.
- compressed (bool, optional): if `True`, the serialized flow will be; defaults to
`True` compressed
- no_url (bool, optional): if `True`, the stdout from this function will not
contain the URL link to the newly-registered flow in the Cloud UI
Returns:
- str: the ID of the newly-registered flow
Raises:
- ClientError: if the register failed
"""
required_parameters = {p for p in flow.parameters() if p.required}
if flow.schedule is not None and required_parameters:
required_names = {p.name for p in required_parameters}
if not all(
[
required_names <= set(c.parameter_defaults.keys())
for c in flow.schedule.clocks
]
):
raise ClientError(
"Flows with required parameters can not be scheduled automatically."
)
if any(e.key for e in flow.edges) and flow.result is None:
warnings.warn(
"No result handler was specified on your Flow. Cloud features such as "
"input caching and resuming task runs from failure may not work properly.",
stacklevel=2,
)
if compressed:
create_mutation = {
"mutation($input: create_flow_from_compressed_string_input!)": {
"create_flow_from_compressed_string(input: $input)": {"id"}
}
}
else:
create_mutation = {
"mutation($input: create_flow_input!)": {
"create_flow(input: $input)": {"id"}
}
}
project = None
if project_name is None:
raise TypeError(
"'project_name' is a required field when registering a flow."
)
query_project = {
"query": {
with_args("project", {"where": {"name": {"_eq": project_name}}}): {
"id": True
}
}
}
project = self.graphql(query_project).data.project # type: ignore
if not project:
raise ValueError(
"Project {} not found. Run `prefect create project '{}'` to create it.".format(
project_name, project_name
)
)
serialized_flow = flow.serialize(build=build) # type: Any
# Set Docker storage image in environment metadata if provided
if isinstance(flow.storage, prefect.environments.storage.Docker):
flow.environment.metadata["image"] = flow.storage.name
serialized_flow = flow.serialize(build=False)
# If no image ever set, default metadata to all_extras image on current version
if not flow.environment.metadata.get("image"):
version = prefect.__version__.split("+")[0]
flow.environment.metadata[
"image"
] = f"prefecthq/prefect:all_extras-{version}"
serialized_flow = flow.serialize(build=False)
# verify that the serialized flow can be deserialized
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
except Exception as exc:
raise ValueError(
"Flow could not be deserialized successfully. Error was: {}".format(
repr(exc)
)
) from exc
if compressed:
serialized_flow = compress(serialized_flow)
res = self.graphql(
create_mutation,
variables=dict(
input=dict(
project_id=(project[0].id if project else None),
serialized_flow=serialized_flow,
set_schedule_active=set_schedule_active,
version_group_id=version_group_id,
)
),
retry_on_api_error=False,
) # type: Any
flow_id = (
res.data.create_flow_from_compressed_string.id
if compressed
else res.data.create_flow.id
)
if not no_url:
# Generate direct link to Cloud flow
flow_url = self.get_cloud_url("flow", flow_id)
prefix = "└── "
print("Flow URL: {}".format(flow_url))
# Extra information to improve visibility
msg = (
f" {prefix}ID: {flow_id}\n"
f" {prefix}Project: {project_name}\n"
f" {prefix}Labels: {list(flow.environment.labels)}"
)
print(msg)
return flow_id
def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
"""
Convenience method for creating Prefect Cloud URLs for a given subdirectory.
Args:
- subdirectory (str): the subdirectory to use (e.g., `"flow-run"`)
- id (str): the ID of the page
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the URL corresponding to the appropriate base URL, tenant slug, subdirectory
and ID
Example:
```python
from prefect import Client
client = Client()
client.get_cloud_url("flow-run", "424242-ca-94611-111-55")
# returns "https://cloud.prefect.io/my-tenant-slug/flow-run/424242-ca-94611-111-55"
```
"""
# Generate direct link to UI
if prefect.config.backend == "cloud":
tenant_slug = self.get_default_tenant_slug(as_user=as_user)
else:
tenant_slug = ""
base_url = (
re.sub("api-", "", prefect.config.cloud.api)
if re.search("api-", prefect.config.cloud.api)
else re.sub("api", "cloud", prefect.config.cloud.api)
)
full_url = prefect.config.cloud.api
if tenant_slug:
full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
elif prefect.config.backend == "server":
full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id])
return full_url
def get_default_tenant_slug(self, as_user: bool = True) -> str:
"""
Get the default tenant slug for the currently authenticated user
Args:
- as_user (bool, optional): whether this query is being made from a USER scoped token;
defaults to `True`. Only used internally for queries made from RUNNERs
Returns:
- str: the slug of the current default tenant for this user
"""
if as_user:
query = {
"query": {"user": {"default_membership": {"tenant": "slug"}}}
} # type: dict
else:
query = {"query": {"tenant": {"slug"}}}
res = self.graphql(query)
if as_user:
user = res.get("data").user[0]
slug = user.default_membership.tenant.slug
else:
slug = res.get("data").tenant[0].slug
return slug
def create_project(self, project_name: str, project_description: str = None) -> str:
"""
Create a new Project
Args:
- project_name (str): the project that should contain this flow
- project_description (str, optional): the project description
Returns:
- str: the ID of the newly-created project
Raises:
- ClientError: if the project creation failed
"""
project_mutation = {
"mutation($input: create_project_input!)": {
"create_project(input: $input)": {"id"}
}
}
res = self.graphql(
project_mutation,
variables=dict(
input=dict(
name=project_name,
description=project_description,
tenant_id=self._active_tenant_id,
)
),
) # type: Any
return res.data.create_project.id
def create_flow_run(
self,
flow_id: str = None,
context: dict = None,
parameters: dict = None,
scheduled_start_time: datetime.datetime = None,
idempotency_key: str = None,
run_name: str = None,
version_group_id: str = None,
) -> str:
"""
Create a new flow run for the given flow id. If `start_time` is not provided, the flow
run will be scheduled to start immediately. If both `flow_id` and `version_group_id`
are provided, only the `flow_id` will be used.
Args:
- flow_id (str, optional): the id of the Flow you wish to schedule
- context (dict, optional): the run context
- parameters (dict, optional): a dictionary of parameter values to pass to the flow run
- scheduled_start_time (datetime, optional): the time to schedule the execution
for; if not provided, defaults to now
- idempotency_key (str, optional): an idempotency key; if provided, this run will
be cached for 24 hours. Any subsequent attempts to create a run with the same
idempotency key will return the ID of the originally created run (no new run
will be created after the first). An error will be raised if parameters or
context are provided and don't match the original. Each subsequent request
will reset the TTL for 24 hours.
- run_name (str, optional): The name assigned to this flow run
- version_group_id (str, optional): if provided, the unique unarchived flow within
this version group will be scheduled to run. This input can be used as a
stable API for running flows which are regularly updated.
Returns:
- str: the ID of the newly-created flow run
Raises:
- ClientError: if the GraphQL query is bad for any reason
"""
create_mutation = {
"mutation($input: create_flow_run_input!)": {
"create_flow_run(input: $input)": {"id": True}
}
}
if not flow_id and not version_group_id:
raise ValueError("One of flow_id or version_group_id must be provided")
if flow_id:
inputs = dict(flow_id=flow_id)
else:
inputs = dict(version_group_id=version_group_id) # type: ignore
if parameters is not None:
inputs.update(parameters=parameters) # type: ignore
if context is not None:
inputs.update(context=context) # type: ignore
if idempotency_key is not None:
inputs.update(idempotency_key=idempotency_key) # type: ignore
if scheduled_start_time is not None:
inputs.update(
scheduled_start_time=scheduled_start_time.isoformat()
) # type: ignore
if run_name is not None:
inputs.update(flow_run_name=run_name) # type: ignore
res = self.graphql(create_mutation, variables=dict(input=inputs))
return res.data.create_flow_run.id # type: ignore
def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult:
"""
Retrieves version and current state information for the given flow run.
Args:
- flow_run_id (str): the id of the flow run to get information for
Returns:
- GraphQLResult: an object representing information about the flow run
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"id": True,
"name": True,
"flow_id": True,
"parameters": True,
"context": True,
"version": True,
"scheduled_start_time": True,
"serialized_state": True,
# load all task runs except dynamic task runs
with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
"id": True,
"task": {"id": True, "slug": True},
"version": True,
"serialized_state": True,
},
}
}
}
result = self.graphql(query).data.flow_run_by_pk # type: ignore
if result is None:
raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))
# convert scheduled_start_time from string to datetime
result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)
# create "state" attribute from serialized_state
result.state = prefect.engine.state.State.deserialize(
result.pop("serialized_state")
)
# reformat task_runs
task_runs = []
for tr in result.task_runs:
tr.state = prefect.engine.state.State.deserialize(
tr.pop("serialized_state")
)
task_info = tr.pop("task")
tr.task_id = task_info["id"]
tr.task_slug = task_info["slug"]
task_runs.append(TaskRunInfoResult(**tr))
result.task_runs = task_runs
result.context = (
result.context.to_dict() if result.context is not None else None
)
result.parameters = (
result.parameters.to_dict() if result.parameters is not None else None
)
return FlowRunInfoResult(**result)
def update_flow_run_heartbeat(self, flow_run_id: str) -> None:
"""
Convenience method for heartbeating a flow run.
Does NOT raise an error if the update fails.
Args:
- flow_run_id (str): the flow run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def update_task_run_heartbeat(self, task_run_id: str) -> None:
"""
Convenience method for heartbeating a task run.
Does NOT raise an error if the update fails.
Args:
- task_run_id (str): the task run ID to heartbeat
"""
mutation = {
"mutation": {
with_args(
"update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a flow run.
Args:
- flow_run_id (str): the id for this flow run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"serialized_state": True,
}
}
}
flow_run = self.graphql(query).data.flow_run_by_pk
return prefect.engine.state.State.deserialize(flow_run.serialized_state)
def set_flow_run_state(
self,
flow_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a flow run in the database.
Args:
- flow_run_id (str): the id of the flow run to set state for
- state (State): the new state for this flow run
- version (int, optional): the current version of the flow run state. This is optional
but it can be supplied to enforce version-locking.
Returns:
- State: the state the current flow run should be considered in
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation($input: set_flow_run_states_input!)": {
"set_flow_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
flow_run_id=flow_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_flow_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def get_latest_cached_states(
self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime
) -> List["prefect.engine.state.State"]:
"""
Pulls all Cached states for the given task that were created after the provided date.
Args:
- task_id (str): the task id for this task run
- cache_key (Optional[str]): the cache key for this Task's cache; if `None`, the
task id alone will be used
- created_after (datetime.datetime): the earliest date the state should have been
created at
Returns:
- List[State]: a list of Cached states created after the given date
"""
args = {
"where": {
"state": {"_eq": "Cached"},
"state_timestamp": {"_gte": created_after.isoformat()},
},
"order_by": {"state_timestamp": EnumValue("desc")},
"limit": 100,
} # type: Dict[str, Any]
# if a cache key was provided, match it against all tasks
if cache_key is not None:
args["where"].update({"cache_key": {"_eq": cache_key}})
# otherwise match against only this task, across all cache keys
else:
args["where"].update({"task_id": {"_eq": task_id}})
query = {"query": {with_args("task_run", args): "serialized_state"}}
result = self.graphql(query) # type: Any
deserializer = prefect.engine.state.State.deserialize
valid_states = [
deserializer(res.serialized_state) for res in result.data.task_run
]
return valid_states
def get_task_run_info(
self, flow_run_id: str, task_id: str, map_index: Optional[int] = None
) -> TaskRunInfoResult:
"""
Retrieves version and current state information for the given task run.
Args:
- flow_run_id (str): the id of the flow run that this task run lives in
- task_id (str): the task id for this task run
- map_index (int, optional): the mapping index for this task run; if
`None`, it is assumed this task is _not_ mapped
Returns:
- NamedTuple: a tuple containing `id, task_id, version, state`
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
"""
mutation = {
"mutation": {
with_args(
"get_or_create_task_run",
{
"input": {
"flow_run_id": flow_run_id,
"task_id": task_id,
"map_index": -1 if map_index is None else map_index,
}
},
): {
"id": True,
}
}
}
result = self.graphql(mutation) # type: Any
if result is None:
raise ClientError("Failed to create task run.")
task_run_id = result.data.get_or_create_task_run.id
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"version": True,
"serialized_state": True,
"task": {"slug": True},
}
}
}
task_run = self.graphql(query).data.task_run_by_pk # type: ignore
if task_run is None:
raise ClientError('Task run ID not found: "{}"'.format(task_run_id))
state = prefect.engine.state.State.deserialize(task_run.serialized_state)
return TaskRunInfoResult(
id=task_run_id,
task_id=task_id,
task_slug=task_run.task.slug,
version=task_run.version,
state=state,
)
def set_task_run_name(self, task_run_id: str, name: str) -> bool:
"""
Set the name of a task run
Args:
- task_run_id (str): the id of a task run
- name (str): a name for this task run
Returns:
- bool: whether or not the task run name was updated
"""
mutation = {
"mutation($input: set_task_run_name_input!)": {
"set_task_run_name(input: $input)": {
"success": True,
}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name))
)
return result.data.set_task_run_name.success
def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State":
"""
Retrieves the current state for a task run.
Args:
- task_run_id (str): the id for this task run
Returns:
- State: a Prefect State object
"""
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"serialized_state": True,
}
}
}
task_run = self.graphql(query).data.task_run_by_pk
return prefect.engine.state.State.deserialize(task_run.serialized_state)
def set_task_run_state(
self,
task_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
cache_for: datetime.timedelta = None,
) -> "prefect.engine.state.State":
"""
Sets new state for a task run.
Args:
- task_run_id (str): the id of the task run to set state for
- state (State): the new state for this task run
- version (int, optional): the current version of the task run state. This is optional
but it can be supplied to enforce version-locking.
- cache_for (timedelta, optional): how long to store the result of this task for,
using the serializer set in config; if not provided, no caching occurs
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
Returns:
- State: the state the current task run should be considered in
"""
mutation = {
"mutation($input: set_task_run_states_input!)": {
"set_task_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
task_run_id=task_run_id,
version=version,
)
]
)
),
) # type: Any
state_payload = result.data.set_task_run_states.states[0]
if state_payload.status == "QUEUED":
# If appropriate, the state attribute of the Queued state can be
# set by the caller of this method
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def set_secret(self, name: str, value: Any) -> None:
"""
Set a secret with the given name and value.
Args:
- name (str): the name of the secret; used for retrieving the secret
during task runs
- value (Any): the value of the secret
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the secret-setting was unsuccessful
"""
mutation = {
"mutation($input: set_secret_input!)": {
"set_secret(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(name=name, value=value))
) # type: Any
if not result.data.set_secret.success:
raise ValueError("Setting secret failed.")
def get_task_tag_limit(self, tag: str) -> Optional[int]:
"""
Retrieve the current task tag concurrency limit for a given tag.
Args:
- tag (str): the tag to update
Raises:
- ClientError: if the GraphQL query fails
"""
query = {
"query": {
with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): {
"limit": True
}
}
}
result = self.graphql(query) # type: Any
if result.data.task_tag_limit:
return result.data.task_tag_limit[0].limit
else:
return None
def update_task_tag_limit(self, tag: str, limit: int) -> None:
"""
Update the task tag concurrency limit for a given tag; requires tenant admin permissions.
Args:
- tag (str): the tag to update
- limit (int): the concurrency limit to enforce on the tag; should be a value >= 0
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag limit-setting was unsuccessful, or if a bad limit was provided
"""
if limit < 0:
raise ValueError("Concurrency limits must be >= 0")
mutation = {
"mutation($input: update_task_tag_limit_input!)": {
"update_task_tag_limit(input: $input)": {"id"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(tag=tag, limit=limit))
) # type: Any
if not result.data.update_task_tag_limit.id:
raise ValueError("Updating the task tag concurrency limit failed.")
def delete_task_tag_limit(self, limit_id: str) -> None:
"""
Deletes a given task tag concurrency limit; requires tenant admin permissions.
Args:
- limit_id (str): the ID of the tag to delete
Raises:
- ClientError: if the GraphQL mutation is bad for any reason
- ValueError: if the tag deletion was unsuccessful, or if a bad tag ID was provided
"""
mutation = {
"mutation($input: delete_task_tag_limit_input!)": {
"delete_task_tag_limit(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(limit_id=limit_id))
) # type: Any
if not result.data.delete_task_tag_limit.success:
raise ValueError("Deleting the task tag concurrency limit failed.")
def write_run_logs(self, logs: List[Dict]) -> None:
"""
Uploads a collection of logs to Cloud.
Args:
- logs (List[Dict]): a list of log entries to add
Raises:
- ValueError: if uploading the logs fail
"""
mutation = {
"mutation($input: write_run_logs_input!)": {
"write_run_logs(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(logs=logs))
) # type: Any
if not result.data.write_run_logs.success:
raise ValueError("Writing logs failed.")
def register_agent(
self,
agent_type: str,
name: str = None,
labels: List[str] = None,
agent_config_id: str = None,
) -> str:
"""
Register an agent with a backend API
Args:
- agent_type (str): The type of agent being registered
- name: (str, optional): The name of the agent being registered
- labels (List[str], optional): A list of any present labels on the agent
being registered
- agent_config_id (str, optional): The ID of an agent configuration to register with
Returns:
- The agent ID as a string
"""
mutation = {
"mutation($input: register_agent_input!)": {
"register_agent(input: $input)": {"id"}
}
}
result = self.graphql(
mutation,
variables=dict(
input=dict(
type=agent_type,
name=name,
labels=labels or [],
tenant_id=self._active_tenant_id,
agent_config_id=agent_config_id,
)
),
)
if not result.data.register_agent.id:
raise ValueError("Error registering agent")
return result.data.register_agent.id
def get_agent_config(self, agent_config_id: str) -> dict:
"""
Get agent config settings
Args:
- agent_config_id (str): The ID of an agent configuration to retrieve
Returns:
- dict: the agent configuration's `settings`
"""
query = {
"query": {
with_args(
"agent_config", {"where": {"id": {"_eq": agent_config_id}}}
): {"settings": True}
}
}
result = self.graphql(query) # type: Any
return result.data.agent_config[0].settings
| 35.985834
| 101
| 0.55071
|
import datetime
import json
import os
import re
import time
import uuid
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional, Union
from urllib.parse import urljoin
try:
from simplejson.errors import JSONDecodeError
except ImportError:
from json.decoder import JSONDecodeError
import pendulum
import toml
from slugify import slugify
import prefect
from prefect.utilities.exceptions import (
AuthorizationError,
ClientError,
VersionLockError,
)
from prefect.utilities.graphql import (
EnumValue,
GraphQLResult,
compress,
parse_graphql,
with_args,
)
from prefect.utilities.logging import create_diagnostic_logger
if TYPE_CHECKING:
from prefect.core import Flow
import requests
JSONLike = Union[bool, dict, list, str, int, float, None]
TaskRunInfoResult = NamedTuple(
"TaskRunInfoResult",
[
("id", str),
("task_id", str),
("task_slug", str),
("version", int),
("state", "prefect.engine.state.State"),
],
)
FlowRunInfoResult = NamedTuple(
"FlowRunInfoResult",
[
("id", str),
("name", str),
("flow_id", str),
("parameters", Dict[str, Any]),
("context", Dict[str, Any]),
("version", int),
("scheduled_start_time", datetime.datetime),
("state", "prefect.engine.state.State"),
("task_runs", List[TaskRunInfoResult]),
],
)
class Client:
def __init__(self, api_server: str = None, api_token: str = None):
self._access_token = None
self._refresh_token = None
self._access_token_expires_at = pendulum.now()
self._active_tenant_id = None
self._attached_headers = {}
self.logger = create_diagnostic_logger("Diagnostics")
self.api_server = api_server or prefect.context.config.cloud.get("graphql")
self._api_token = api_token or prefect.context.config.cloud.get(
"auth_token", None
)
if prefect.config.backend == "cloud":
if not self._api_token:
settings = self._load_local_settings()
self._api_token = settings.get("api_token")
if self._api_token:
self._active_tenant_id = settings.get("active_tenant_id")
if self._active_tenant_id:
try:
self.login_to_tenant(tenant_id=self._active_tenant_id)
except AuthorizationError:
self.logout_from_tenant()
else:
if not self._active_tenant_id:
tenant_info = self.graphql({"query": {"tenant": {"id"}}})
if tenant_info.data.tenant:
self._active_tenant_id = tenant_info.data.tenant[0].id
def create_tenant(self, name: str, slug: str = None) -> str:
if prefect.config.backend != "server":
msg = "To create a tenant with Prefect Cloud, please signup at https://cloud.prefect.io/"
raise ValueError(msg)
if slug is None:
slug = slugify(name)
tenant_info = self.graphql(
{
"mutation($input: create_tenant_input!)": {
"create_tenant(input: $input)": {"id"}
}
},
variables=dict(input=dict(name=name, slug=slug)),
)
return tenant_info.data.create_tenant.id
def get(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
response = self._request(
method="GET",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def post(
self,
path: str,
server: str = None,
headers: dict = None,
params: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> dict:
response = self._request(
method="POST",
path=path,
params=params,
server=server,
headers=headers,
token=token,
retry_on_api_error=retry_on_api_error,
)
if response.text:
return response.json()
else:
return {}
def graphql(
self,
query: Any,
raise_on_error: bool = True,
headers: Dict[str, str] = None,
variables: Dict[str, JSONLike] = None,
token: str = None,
retry_on_api_error: bool = True,
) -> GraphQLResult:
result = self.post(
path="",
server=self.api_server,
headers=headers,
params=dict(query=parse_graphql(query), variables=json.dumps(variables)),
token=token,
retry_on_api_error=retry_on_api_error,
)
if raise_on_error and "errors" in result:
if "UNAUTHENTICATED" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif "Malformed Authorization header" in str(result["errors"]):
raise AuthorizationError(result["errors"])
elif (
result["errors"][0].get("extensions", {}).get("code")
== "VERSION_LOCKING_ERROR"
):
raise VersionLockError(result["errors"])
raise ClientError(result["errors"])
else:
return GraphQLResult(result)
def _send_request(
self,
session: "requests.Session",
method: str,
url: str,
params: Dict[str, JSONLike] = None,
headers: dict = None,
) -> "requests.models.Response":
if prefect.context.config.cloud.get("diagnostics") is True:
self.logger.debug(f"Preparing request to {url}")
clean_headers = {
head: re.sub("Bearer .*", "Bearer XXXX", val)
for head, val in headers.items()
}
self.logger.debug(f"Headers: {clean_headers}")
self.logger.debug(f"Request: {params}")
start_time = time.time()
if method == "GET":
response = session.get(url, headers=headers, params=params, timeout=30)
elif method == "POST":
response = session.post(url, headers=headers, json=params, timeout=30)
elif method == "DELETE":
response = session.delete(url, headers=headers, timeout=30)
else:
raise ValueError("Invalid method: {}".format(method))
if prefect.context.config.cloud.get("diagnostics") is True:
end_time = time.time()
self.logger.debug(f"Response: {response.json()}")
self.logger.debug(
f"Request duration: {round(end_time - start_time, 4)} seconds"
)
response.raise_for_status()
return response
def _request(
self,
method: str,
path: str,
params: Dict[str, JSONLike] = None,
server: str = None,
headers: dict = None,
token: str = None,
retry_on_api_error: bool = True,
) -> "requests.models.Response":
if server is None:
server = self.api_server
assert isinstance(server, str)
if token is None:
token = self.get_auth_token()
import requests
url = urljoin(server, path.lstrip("/")).rstrip("/")
params = params or {}
headers = headers or {}
if token:
headers["Authorization"] = "Bearer {}".format(token)
headers["X-PREFECT-CORE-VERSION"] = str(prefect.__version__)
if self._attached_headers:
headers.update(self._attached_headers)
session = requests.Session()
retry_total = 6 if prefect.config.backend == "cloud" else 1
retries = requests.packages.urllib3.util.retry.Retry(
total=retry_total,
backoff_factor=1,
status_forcelist=[500, 502, 503, 504],
method_whitelist=["DELETE", "GET", "POST"],
)
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=retries))
response = self._send_request(
session=session, method=method, url=url, params=params, headers=headers
)
try:
json_resp = response.json()
except JSONDecodeError as exc:
if prefect.config.backend == "cloud" and "Authorization" not in headers:
raise ClientError(
"Malformed response received from Cloud - please ensure that you "
"have an API token properly configured."
) from exc
else:
raise ClientError("Malformed response received from API.") from exc
if "API_ERROR" in str(json_resp.get("errors")) and retry_on_api_error:
success, retry_count = False, 0
while success is False and retry_count < 6:
response = self._send_request(
session=session,
method=method,
url=url,
params=params,
headers=headers,
)
if "API_ERROR" in str(response.json().get("errors")):
retry_count += 1
time.sleep(0.25 * (2 ** (retry_count - 1)))
else:
success = True
return response
def attach_headers(self, headers: dict) -> None:
self._attached_headers.update(headers)
@property
def _local_settings_path(self) -> Path:
path = "{home}/client/{server}".format(
home=prefect.context.config.home_dir,
server=slugify(self.api_server, regex_pattern=r"[^-\.a-z0-9]+"),
)
return Path(os.path.expanduser(path)) / "settings.toml"
def _save_local_settings(self, settings: dict) -> None:
self._local_settings_path.parent.mkdir(exist_ok=True, parents=True)
with self._local_settings_path.open("w+") as f:
toml.dump(settings, f)
def _load_local_settings(self) -> dict:
if self._local_settings_path.exists():
with self._local_settings_path.open("r") as f:
return toml.load(f)
return {}
def save_api_token(self) -> None:
settings = self._load_local_settings()
settings["api_token"] = self._api_token
self._save_local_settings(settings)
def get_auth_token(self) -> str:
if not self._access_token:
return self._api_token
expiration = self._access_token_expires_at or pendulum.now()
if self._refresh_token and pendulum.now().add(seconds=30) > expiration:
self._refresh_access_token()
return self._access_token
def get_available_tenants(self) -> List[Dict]:
result = self.graphql(
{"query": {"tenant(order_by: {slug: asc})": {"id", "slug", "name"}}},
token=self._api_token,
)
return result.data.tenant
def login_to_tenant(self, tenant_slug: str = None, tenant_id: str = None) -> bool:
if tenant_slug is None and tenant_id is None:
raise ValueError(
"At least one of `tenant_slug` or `tenant_id` must be provided."
)
elif tenant_id:
try:
uuid.UUID(tenant_id)
except ValueError as exc:
raise ValueError("The `tenant_id` must be a valid UUID.") from exc
tenant = self.graphql(
{
"query($slug: String, $id: uuid)": {
"tenant(where: {slug: { _eq: $slug }, id: { _eq: $id } })": {"id"}
}
},
variables=dict(slug=tenant_slug, id=tenant_id),
token=self._api_token,
)
if not tenant.data.tenant:
raise ValueError("No matching tenants found.")
tenant_id = tenant.data.tenant[0].id
if prefect.config.backend == "cloud":
payload = self.graphql(
{
"mutation($input: switch_tenant_input!)": {
"switch_tenant(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(tenant_id=tenant_id)),
token=self._api_token,
)
self._access_token = payload.data.switch_tenant.access_token
self._access_token_expires_at = pendulum.parse(
payload.data.switch_tenant.expires_at
)
self._refresh_token = payload.data.switch_tenant.refresh_token
self._active_tenant_id = tenant_id
settings = self._load_local_settings()
settings["active_tenant_id"] = self._active_tenant_id
self._save_local_settings(settings)
return True
def logout_from_tenant(self) -> None:
self._access_token = None
self._refresh_token = None
self._active_tenant_id = None
settings = self._load_local_settings()
settings["active_tenant_id"] = None
self._save_local_settings(settings)
def _refresh_access_token(self) -> bool:
payload = self.graphql(
{
"mutation($input: refresh_token_input!)": {
"refresh_token(input: $input)": {
"access_token",
"expires_at",
"refresh_token",
}
}
},
variables=dict(input=dict(access_token=self._access_token)),
token=self._refresh_token,
)
self._access_token = payload.data.refresh_token.access_token
self._access_token_expires_at = pendulum.parse(
payload.data.refresh_token.expires_at
)
self._refresh_token = payload.data.refresh_token.refresh_token
return True
def register(
self,
flow: "Flow",
project_name: str = None,
build: bool = True,
set_schedule_active: bool = True,
version_group_id: str = None,
compressed: bool = True,
no_url: bool = False,
) -> str:
required_parameters = {p for p in flow.parameters() if p.required}
if flow.schedule is not None and required_parameters:
required_names = {p.name for p in required_parameters}
if not all(
[
required_names <= set(c.parameter_defaults.keys())
for c in flow.schedule.clocks
]
):
raise ClientError(
"Flows with required parameters can not be scheduled automatically."
)
if any(e.key for e in flow.edges) and flow.result is None:
warnings.warn(
"No result handler was specified on your Flow. Cloud features such as "
"input caching and resuming task runs from failure may not work properly.",
stacklevel=2,
)
if compressed:
create_mutation = {
"mutation($input: create_flow_from_compressed_string_input!)": {
"create_flow_from_compressed_string(input: $input)": {"id"}
}
}
else:
create_mutation = {
"mutation($input: create_flow_input!)": {
"create_flow(input: $input)": {"id"}
}
}
project = None
if project_name is None:
raise TypeError(
"'project_name' is a required field when registering a flow."
)
query_project = {
"query": {
with_args("project", {"where": {"name": {"_eq": project_name}}}): {
"id": True
}
}
}
project = self.graphql(query_project).data.project
if not project:
raise ValueError(
"Project {} not found. Run `prefect create project '{}'` to create it.".format(
project_name, project_name
)
)
serialized_flow = flow.serialize(build=build)
if isinstance(flow.storage, prefect.environments.storage.Docker):
flow.environment.metadata["image"] = flow.storage.name
serialized_flow = flow.serialize(build=False)
if not flow.environment.metadata.get("image"):
version = prefect.__version__.split("+")[0]
flow.environment.metadata[
"image"
] = f"prefecthq/prefect:all_extras-{version}"
serialized_flow = flow.serialize(build=False)
try:
prefect.serialization.flow.FlowSchema().load(serialized_flow)
except Exception as exc:
raise ValueError(
"Flow could not be deserialized successfully. Error was: {}".format(
repr(exc)
)
) from exc
if compressed:
serialized_flow = compress(serialized_flow)
res = self.graphql(
create_mutation,
variables=dict(
input=dict(
project_id=(project[0].id if project else None),
serialized_flow=serialized_flow,
set_schedule_active=set_schedule_active,
version_group_id=version_group_id,
)
),
retry_on_api_error=False,
)
flow_id = (
res.data.create_flow_from_compressed_string.id
if compressed
else res.data.create_flow.id
)
if not no_url:
flow_url = self.get_cloud_url("flow", flow_id)
prefix = "└── "
print("Flow URL: {}".format(flow_url))
msg = (
f" {prefix}ID: {flow_id}\n"
f" {prefix}Project: {project_name}\n"
f" {prefix}Labels: {list(flow.environment.labels)}"
)
print(msg)
return flow_id
def get_cloud_url(self, subdirectory: str, id: str, as_user: bool = True) -> str:
if prefect.config.backend == "cloud":
tenant_slug = self.get_default_tenant_slug(as_user=as_user)
else:
tenant_slug = ""
base_url = (
re.sub("api-", "", prefect.config.cloud.api)
if re.search("api-", prefect.config.cloud.api)
else re.sub("api", "cloud", prefect.config.cloud.api)
)
full_url = prefect.config.cloud.api
if tenant_slug:
full_url = "/".join([base_url.rstrip("/"), tenant_slug, subdirectory, id])
elif prefect.config.backend == "server":
full_url = "/".join([prefect.config.server.ui.endpoint, subdirectory, id])
return full_url
def get_default_tenant_slug(self, as_user: bool = True) -> str:
if as_user:
query = {
"query": {"user": {"default_membership": {"tenant": "slug"}}}
}
else:
query = {"query": {"tenant": {"slug"}}}
res = self.graphql(query)
if as_user:
user = res.get("data").user[0]
slug = user.default_membership.tenant.slug
else:
slug = res.get("data").tenant[0].slug
return slug
def create_project(self, project_name: str, project_description: str = None) -> str:
project_mutation = {
"mutation($input: create_project_input!)": {
"create_project(input: $input)": {"id"}
}
}
res = self.graphql(
project_mutation,
variables=dict(
input=dict(
name=project_name,
description=project_description,
tenant_id=self._active_tenant_id,
)
),
)
return res.data.create_project.id
def create_flow_run(
self,
flow_id: str = None,
context: dict = None,
parameters: dict = None,
scheduled_start_time: datetime.datetime = None,
idempotency_key: str = None,
run_name: str = None,
version_group_id: str = None,
) -> str:
create_mutation = {
"mutation($input: create_flow_run_input!)": {
"create_flow_run(input: $input)": {"id": True}
}
}
if not flow_id and not version_group_id:
raise ValueError("One of flow_id or version_group_id must be provided")
if flow_id:
inputs = dict(flow_id=flow_id)
else:
inputs = dict(version_group_id=version_group_id)
if parameters is not None:
inputs.update(parameters=parameters)
if context is not None:
inputs.update(context=context)
if idempotency_key is not None:
inputs.update(idempotency_key=idempotency_key)
if scheduled_start_time is not None:
inputs.update(
scheduled_start_time=scheduled_start_time.isoformat()
)
if run_name is not None:
inputs.update(flow_run_name=run_name)
res = self.graphql(create_mutation, variables=dict(input=inputs))
return res.data.create_flow_run.id
def get_flow_run_info(self, flow_run_id: str) -> FlowRunInfoResult:
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"id": True,
"name": True,
"flow_id": True,
"parameters": True,
"context": True,
"version": True,
"scheduled_start_time": True,
"serialized_state": True,
with_args("task_runs", {"where": {"map_index": {"_eq": -1}}}): {
"id": True,
"task": {"id": True, "slug": True},
"version": True,
"serialized_state": True,
},
}
}
}
result = self.graphql(query).data.flow_run_by_pk
if result is None:
raise ClientError('Flow run ID not found: "{}"'.format(flow_run_id))
result.scheduled_start_time = pendulum.parse(result.scheduled_start_time)
result.state = prefect.engine.state.State.deserialize(
result.pop("serialized_state")
)
task_runs = []
for tr in result.task_runs:
tr.state = prefect.engine.state.State.deserialize(
tr.pop("serialized_state")
)
task_info = tr.pop("task")
tr.task_id = task_info["id"]
tr.task_slug = task_info["slug"]
task_runs.append(TaskRunInfoResult(**tr))
result.task_runs = task_runs
result.context = (
result.context.to_dict() if result.context is not None else None
)
result.parameters = (
result.parameters.to_dict() if result.parameters is not None else None
)
return FlowRunInfoResult(**result)
def update_flow_run_heartbeat(self, flow_run_id: str) -> None:
mutation = {
"mutation": {
with_args(
"update_flow_run_heartbeat", {"input": {"flow_run_id": flow_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def update_task_run_heartbeat(self, task_run_id: str) -> None:
mutation = {
"mutation": {
with_args(
"update_task_run_heartbeat", {"input": {"task_run_id": task_run_id}}
): {"success"}
}
}
self.graphql(mutation, raise_on_error=True)
def get_flow_run_state(self, flow_run_id: str) -> "prefect.engine.state.State":
query = {
"query": {
with_args("flow_run_by_pk", {"id": flow_run_id}): {
"serialized_state": True,
}
}
}
flow_run = self.graphql(query).data.flow_run_by_pk
return prefect.engine.state.State.deserialize(flow_run.serialized_state)
def set_flow_run_state(
self,
flow_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
) -> "prefect.engine.state.State":
mutation = {
"mutation($input: set_flow_run_states_input!)": {
"set_flow_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
flow_run_id=flow_run_id,
version=version,
)
]
)
),
)
state_payload = result.data.set_flow_run_states.states[0]
if state_payload.status == "QUEUED":
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def get_latest_cached_states(
self, task_id: str, cache_key: Optional[str], created_after: datetime.datetime
) -> List["prefect.engine.state.State"]:
args = {
"where": {
"state": {"_eq": "Cached"},
"state_timestamp": {"_gte": created_after.isoformat()},
},
"order_by": {"state_timestamp": EnumValue("desc")},
"limit": 100,
}
if cache_key is not None:
args["where"].update({"cache_key": {"_eq": cache_key}})
else:
args["where"].update({"task_id": {"_eq": task_id}})
query = {"query": {with_args("task_run", args): "serialized_state"}}
result = self.graphql(query)
deserializer = prefect.engine.state.State.deserialize
valid_states = [
deserializer(res.serialized_state) for res in result.data.task_run
]
return valid_states
def get_task_run_info(
self, flow_run_id: str, task_id: str, map_index: Optional[int] = None
) -> TaskRunInfoResult:
mutation = {
"mutation": {
with_args(
"get_or_create_task_run",
{
"input": {
"flow_run_id": flow_run_id,
"task_id": task_id,
"map_index": -1 if map_index is None else map_index,
}
},
): {
"id": True,
}
}
}
result = self.graphql(mutation)
if result is None:
raise ClientError("Failed to create task run.")
task_run_id = result.data.get_or_create_task_run.id
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"version": True,
"serialized_state": True,
"task": {"slug": True},
}
}
}
task_run = self.graphql(query).data.task_run_by_pk
if task_run is None:
raise ClientError('Task run ID not found: "{}"'.format(task_run_id))
state = prefect.engine.state.State.deserialize(task_run.serialized_state)
return TaskRunInfoResult(
id=task_run_id,
task_id=task_id,
task_slug=task_run.task.slug,
version=task_run.version,
state=state,
)
def set_task_run_name(self, task_run_id: str, name: str) -> bool:
mutation = {
"mutation($input: set_task_run_name_input!)": {
"set_task_run_name(input: $input)": {
"success": True,
}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(task_run_id=task_run_id, name=name))
)
return result.data.set_task_run_name.success
def get_task_run_state(self, task_run_id: str) -> "prefect.engine.state.State":
query = {
"query": {
with_args("task_run_by_pk", {"id": task_run_id}): {
"serialized_state": True,
}
}
}
task_run = self.graphql(query).data.task_run_by_pk
return prefect.engine.state.State.deserialize(task_run.serialized_state)
def set_task_run_state(
self,
task_run_id: str,
state: "prefect.engine.state.State",
version: int = None,
cache_for: datetime.timedelta = None,
) -> "prefect.engine.state.State":
mutation = {
"mutation($input: set_task_run_states_input!)": {
"set_task_run_states(input: $input)": {
"states": {"id", "status", "message"}
}
}
}
serialized_state = state.serialize()
result = self.graphql(
mutation,
variables=dict(
input=dict(
states=[
dict(
state=serialized_state,
task_run_id=task_run_id,
version=version,
)
]
)
),
)
state_payload = result.data.set_task_run_states.states[0]
if state_payload.status == "QUEUED":
return prefect.engine.state.Queued(
message=state_payload.get("message"),
start_time=pendulum.now("UTC").add(
seconds=prefect.context.config.cloud.queue_interval
),
)
return state
def set_secret(self, name: str, value: Any) -> None:
mutation = {
"mutation($input: set_secret_input!)": {
"set_secret(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(name=name, value=value))
)
if not result.data.set_secret.success:
raise ValueError("Setting secret failed.")
def get_task_tag_limit(self, tag: str) -> Optional[int]:
query = {
"query": {
with_args("task_tag_limit", {"where": {"tag": {"_eq": tag}}}): {
"limit": True
}
}
}
result = self.graphql(query)
if result.data.task_tag_limit:
return result.data.task_tag_limit[0].limit
else:
return None
def update_task_tag_limit(self, tag: str, limit: int) -> None:
if limit < 0:
raise ValueError("Concurrency limits must be >= 0")
mutation = {
"mutation($input: update_task_tag_limit_input!)": {
"update_task_tag_limit(input: $input)": {"id"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(tag=tag, limit=limit))
)
if not result.data.update_task_tag_limit.id:
raise ValueError("Updating the task tag concurrency limit failed.")
def delete_task_tag_limit(self, limit_id: str) -> None:
mutation = {
"mutation($input: delete_task_tag_limit_input!)": {
"delete_task_tag_limit(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(limit_id=limit_id))
)
if not result.data.delete_task_tag_limit.success:
raise ValueError("Deleting the task tag concurrency limit failed.")
def write_run_logs(self, logs: List[Dict]) -> None:
mutation = {
"mutation($input: write_run_logs_input!)": {
"write_run_logs(input: $input)": {"success"}
}
}
result = self.graphql(
mutation, variables=dict(input=dict(logs=logs))
)
if not result.data.write_run_logs.success:
raise ValueError("Writing logs failed.")
def register_agent(
self,
agent_type: str,
name: str = None,
labels: List[str] = None,
agent_config_id: str = None,
) -> str:
mutation = {
"mutation($input: register_agent_input!)": {
"register_agent(input: $input)": {"id"}
}
}
result = self.graphql(
mutation,
variables=dict(
input=dict(
type=agent_type,
name=name,
labels=labels or [],
tenant_id=self._active_tenant_id,
agent_config_id=agent_config_id,
)
),
)
if not result.data.register_agent.id:
raise ValueError("Error registering agent")
return result.data.register_agent.id
def get_agent_config(self, agent_config_id: str) -> dict:
query = {
"query": {
with_args(
"agent_config", {"where": {"id": {"_eq": agent_config_id}}}
): {"settings": True}
}
}
result = self.graphql(query)
return result.data.agent_config[0].settings
| true
| true
|
790b8abf7f9381e65335a4c3075421b84e923688
| 2,238
|
py
|
Python
|
darknet_model_client.py
|
gouchicao/darknet-serving
|
0024570ca2c3ec12866e3523e18975dc7e3ab836
|
[
"MIT"
] | 8
|
2019-06-23T21:05:52.000Z
|
2020-10-31T02:41:27.000Z
|
darknet_model_client.py
|
gouchicao/darknet-serving
|
0024570ca2c3ec12866e3523e18975dc7e3ab836
|
[
"MIT"
] | null | null | null |
darknet_model_client.py
|
gouchicao/darknet-serving
|
0024570ca2c3ec12866e3523e18975dc7e3ab836
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import random
import logging
import argparse
import grpc
import object_detection_pb2
import object_detection_pb2_grpc
BLOCK_SIZE = 40000
class ImageDataBlockRequestIterable(object):
def __init__(self, img_data):
self.data = img_data
self.pos = 0
def __iter__(self):
return self
def __next__(self):
data_block = self.data[self.pos:self.pos+BLOCK_SIZE]
if data_block:
request = object_detection_pb2.UploadImageRequest(
data_block = data_block
)
self.pos += BLOCK_SIZE
return request
else:
raise StopIteration
class gRPCClient():
def __init__(self, server_address):
logging.basicConfig()
channel = grpc.insecure_channel(server_address)
self.stub = object_detection_pb2_grpc.ObjectDetectionStub(channel)
def detect(self, img_data):
if img_data:
data_block_iterable = ImageDataBlockRequestIterable(img_data)
try:
response = self.stub.detect(data_block_iterable)
return response
except grpc.RpcError as err:
print(err.details()) #pylint: disable=no-member
#print('{}, {}'.format(err.code().name, err.code().value())) #pylint: disable=no-member
else:
print('image data is none.')
def read_image(filename):
img_data = None
with open(filename, 'rb') as f:
img_data = f.read()
return img_data
# python darknet_model_client.py -a 127.0.0.1:7713 -f ../darknet/model-zoo/platen-switch/test/IMG_9256.JPG
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--server_address', type=str, help='server address 127.0.0.1:7713', default='[::]:7713')
parser.add_argument('-f', '--image_file', type=str, help='image file path.')
args = parser.parse_args()
if args.server_address and args.image_file:
img_data = read_image(args.image_file)
client = gRPCClient(args.server_address)
response = client.detect(img_data)
print(response)
else:
print("argument isn't none.")
| 27.62963
| 118
| 0.637623
|
from __future__ import print_function
import random
import logging
import argparse
import grpc
import object_detection_pb2
import object_detection_pb2_grpc
BLOCK_SIZE = 40000
class ImageDataBlockRequestIterable(object):
def __init__(self, img_data):
self.data = img_data
self.pos = 0
def __iter__(self):
return self
def __next__(self):
data_block = self.data[self.pos:self.pos+BLOCK_SIZE]
if data_block:
request = object_detection_pb2.UploadImageRequest(
data_block = data_block
)
self.pos += BLOCK_SIZE
return request
else:
raise StopIteration
class gRPCClient():
def __init__(self, server_address):
logging.basicConfig()
channel = grpc.insecure_channel(server_address)
self.stub = object_detection_pb2_grpc.ObjectDetectionStub(channel)
def detect(self, img_data):
if img_data:
data_block_iterable = ImageDataBlockRequestIterable(img_data)
try:
response = self.stub.detect(data_block_iterable)
return response
except grpc.RpcError as err:
print(err.details())
print('image data is none.')
def read_image(filename):
img_data = None
with open(filename, 'rb') as f:
img_data = f.read()
return img_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--server_address', type=str, help='server address 127.0.0.1:7713', default='[::]:7713')
parser.add_argument('-f', '--image_file', type=str, help='image file path.')
args = parser.parse_args()
if args.server_address and args.image_file:
img_data = read_image(args.image_file)
client = gRPCClient(args.server_address)
response = client.detect(img_data)
print(response)
else:
print("argument isn't none.")
| true
| true
|
790b8acbfbb77b067f3c0226ae098a5107f695a3
| 732
|
py
|
Python
|
talosblockchain/global_tests/test_udprpc_server.py
|
chunchuan-wang/droplet-engine
|
5c2dbac90aa3bde837ed4989ecd78235e5d9ef8e
|
[
"Apache-2.0"
] | 10
|
2020-10-14T14:22:20.000Z
|
2022-03-16T11:33:14.000Z
|
talosblockchain/global_tests/test_udprpc_server.py
|
chunchuan-wang/droplet-engine
|
5c2dbac90aa3bde837ed4989ecd78235e5d9ef8e
|
[
"Apache-2.0"
] | null | null | null |
talosblockchain/global_tests/test_udprpc_server.py
|
chunchuan-wang/droplet-engine
|
5c2dbac90aa3bde837ed4989ecd78235e5d9ef8e
|
[
"Apache-2.0"
] | 4
|
2020-08-30T12:40:40.000Z
|
2021-08-03T15:27:12.000Z
|
#© 2017-2020, ETH Zurich, D-INFK, lubu@inf.ethz.ch
from rpcudp.protocol import RPCProtocol
from twisted.internet import reactor
from talosstorage.checks import QueryToken
from talosstorage.chunkdata import CloudChunk
class RPCServer(RPCProtocol):
# Any methods starting with "rpc_" are available to clients.
def rpc_sayhi(self, sender, chunk, token):
token = QueryToken.from_json(token)
# This could return a Deferred as well. sender is (ip,port)
chunk_orig = CloudChunk.decode(chunk)
return "Tag is %s you live at %s:%i and token is %s" % (chunk_orig.get_tag_hex(), sender[0], sender[1], token.owner)
# start a server on UDP port 1234
reactor.listenUDP(1234, RPCServer())
reactor.run()
| 36.6
| 125
| 0.726776
|
from rpcudp.protocol import RPCProtocol
from twisted.internet import reactor
from talosstorage.checks import QueryToken
from talosstorage.chunkdata import CloudChunk
class RPCServer(RPCProtocol):
def rpc_sayhi(self, sender, chunk, token):
token = QueryToken.from_json(token)
chunk_orig = CloudChunk.decode(chunk)
return "Tag is %s you live at %s:%i and token is %s" % (chunk_orig.get_tag_hex(), sender[0], sender[1], token.owner)
reactor.listenUDP(1234, RPCServer())
reactor.run()
| true
| true
|
790b8b3a9ab7ad264f6469a95ecd011e62f74329
| 11,096
|
py
|
Python
|
distributed/cli/dask_worker.py
|
deniederhut/distributed
|
b83edbef74a1718d62e51a9cee0379b7617048e1
|
[
"BSD-3-Clause"
] | 26
|
2015-09-09T11:35:47.000Z
|
2020-06-14T18:36:50.000Z
|
distributed/cli/dask_worker.py
|
deniederhut/distributed
|
b83edbef74a1718d62e51a9cee0379b7617048e1
|
[
"BSD-3-Clause"
] | 19
|
2015-10-07T19:25:55.000Z
|
2019-06-06T20:40:24.000Z
|
distributed/cli/dask_worker.py
|
deniederhut/distributed
|
b83edbef74a1718d62e51a9cee0379b7617048e1
|
[
"BSD-3-Clause"
] | 8
|
2015-10-22T21:23:09.000Z
|
2019-07-12T14:09:17.000Z
|
import atexit
import logging
import multiprocessing
import gc
import os
from sys import exit
import warnings
import click
import dask
from distributed import Nanny, Worker
from distributed.security import Security
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from toolz import valmap
from tornado.ioloop import IOLoop, TimeoutError
from tornado import gen
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
type=int,
default=0,
help="Serving computation port, defaults to random",
)
@click.option(
"--nanny-port", type=int, default=0, help="Serving nanny port, defaults to random"
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=int,
default=1,
show_default=True,
help="Number of worker processes to launch.",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nprocs then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="Bytes of memory per process that the worker can use. "
"This can be an integer (bytes), "
"float (fraction of total system memory), "
"string (like 5GB or 5000M), "
"'auto', or zero for no memory management",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default="", type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default="",
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nprocs').",
)
@click.option(
"--scheduler-file",
type=str,
default="",
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default="",
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
**kwargs
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None:
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = Security(
**{
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
)
if nprocs > 1 and worker_port != 0:
logger.error(
"Failed to launch worker. You cannot use the --port argument when nprocs > 1."
)
exit(1)
if nprocs > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1."
)
exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
exit(1)
if nprocs > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when nprocs > 1."
)
exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if contact_address:
# we only need this to verify it is getting parsed
(_, _) = get_address_host_port(contact_address, strict=True)
else:
# if contact address is not present we use the listen_address for contact
contact_address = listen_address
except ValueError as e:
logger.error("Failed to launch worker. " + str(e))
exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = multiprocessing.cpu_count() // nprocs
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = Worker
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard_address=dashboard_address if dashboard else None,
service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
name=name if nprocs == 1 or not name else name + "-" + str(i),
**kwargs
)
for i in range(nprocs)
]
@gen.coroutine
def close_all():
# Unregister all workers from scheduler
if nanny:
yield [n.close(timeout=2) for n in nannies]
def on_signal(signum):
logger.info("Exiting on signal %d", signum)
close_all()
@gen.coroutine
def run():
yield nannies
yield [n.finished() for n in nannies]
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
# We already log the exception in nanny / worker. Don't do it again.
raise TimeoutError("Timed out starting worker.") from None
except KeyboardInterrupt:
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go()
| 26.997567
| 95
| 0.62518
|
import atexit
import logging
import multiprocessing
import gc
import os
from sys import exit
import warnings
import click
import dask
from distributed import Nanny, Worker
from distributed.security import Security
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from toolz import valmap
from tornado.ioloop import IOLoop, TimeoutError
from tornado import gen
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
type=int,
default=0,
help="Serving computation port, defaults to random",
)
@click.option(
"--nanny-port", type=int, default=0, help="Serving nanny port, defaults to random"
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=int,
default=1,
show_default=True,
help="Number of worker processes to launch.",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nprocs then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="Bytes of memory per process that the worker can use. "
"This can be an integer (bytes), "
"float (fraction of total system memory), "
"string (like 5GB or 5000M), "
"'auto', or zero for no memory management",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default="", type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default="",
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nprocs').",
)
@click.option(
"--scheduler-file",
type=str,
default="",
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default="",
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
**kwargs
):
g0, g1, g2 = gc.get_threshold()
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None:
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = Security(
**{
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
)
if nprocs > 1 and worker_port != 0:
logger.error(
"Failed to launch worker. You cannot use the --port argument when nprocs > 1."
)
exit(1)
if nprocs > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1."
)
exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
exit(1)
if nprocs > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when nprocs > 1."
)
exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if contact_address:
(_, _) = get_address_host_port(contact_address, strict=True)
else:
contact_address = listen_address
except ValueError as e:
logger.error("Failed to launch worker. " + str(e))
exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = multiprocessing.cpu_count() // nprocs
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = Worker
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard_address=dashboard_address if dashboard else None,
service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
name=name if nprocs == 1 or not name else name + "-" + str(i),
**kwargs
)
for i in range(nprocs)
]
@gen.coroutine
def close_all():
if nanny:
yield [n.close(timeout=2) for n in nannies]
def on_signal(signum):
logger.info("Exiting on signal %d", signum)
close_all()
@gen.coroutine
def run():
yield nannies
yield [n.finished() for n in nannies]
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
raise TimeoutError("Timed out starting worker.") from None
except KeyboardInterrupt:
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go()
| true
| true
|
790b8b7bc539f6f7b41ca4ac7e89a16d7889de2b
| 21,029
|
py
|
Python
|
tests/reflection.py
|
Abhishek5101/peewee
|
bc4ada143a1e5e8a92af22d40a02bcd41e14c1bf
|
[
"MIT"
] | 1
|
2019-03-09T05:08:56.000Z
|
2019-03-09T05:08:56.000Z
|
tests/reflection.py
|
Abhishek5101/peewee
|
bc4ada143a1e5e8a92af22d40a02bcd41e14c1bf
|
[
"MIT"
] | null | null | null |
tests/reflection.py
|
Abhishek5101/peewee
|
bc4ada143a1e5e8a92af22d40a02bcd41e14c1bf
|
[
"MIT"
] | null | null | null |
import datetime
import os
import re
from peewee import *
from playhouse.reflection import *
from .base import IS_SQLITE_OLD
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import requires_models
from .base import requires_sqlite
from .base import skip_if
from .base_models import Tweet
from .base_models import User
class ColTypes(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
class Nullable(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(TestModel):
col_types = ForeignKeyField(ColTypes, backref='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(TestModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(TestModel):
_id = AutoField()
_name = CharField()
class Category(TestModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(TestModel):
category_id = ForeignKeyField(Category, column_name='category_id')
category = CharField()
class BaseReflectionTestCase(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
class TestReflection(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
# Tests for is_foreign_key=False.
tests = (
('Column', 'column'),
('Foo_iD', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
# Tests for is_foreign_key=True.
tests = (
('Foo_iD', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
# There do not appear to be separate constants for the blob and
# text field types in MySQL's drivers. See GH#1034.
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
class EventLog(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
class DefaultVals(TestModel):
key = CharField(constraints=[SQL('DEFAULT \'foo\'')])
value = IntegerField(constraints=[SQL('DEFAULT 0')])
class Meta:
primary_key = CompositeKey('key', 'value')
class TestReflectDefaultValues(BaseReflectionTestCase):
requires = [DefaultVals, EventLog]
@requires_sqlite
def test_default_values(self):
models = self.introspector.generate_models()
default_vals = models['default_vals']
create_table = (
'CREATE TABLE IF NOT EXISTS "default_vals" ('
'"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', '
'"value" INTEGER NOT NULL DEFAULT 0, '
'PRIMARY KEY ("key", "value"))')
# Re-create table using the introspected schema.
self.assertSQL(default_vals._schema._create_table(), create_table, [])
default_vals.drop_table()
default_vals.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
default_vals = models['default_vals']
self.assertSQL(default_vals._schema._create_table(), create_table, [])
@requires_sqlite
def test_default_values_extended(self):
models = self.introspector.generate_models()
eventlog = models['event_log']
create_table = (
'CREATE TABLE IF NOT EXISTS "event_log" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" VARCHAR(255) NOT NULL DEFAULT \'\', '
'"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, '
'"flags" INTEGER NOT NULL DEFAULT 0, '
'"misc" TEXT NOT NULL DEFAULT \'foo\')')
# Re-create table using the introspected schema.
self.assertSQL(eventlog._schema._create_table(), create_table, [])
eventlog.drop_table()
eventlog.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
eventlog = models['event_log']
self.assertSQL(eventlog._schema._create_table(), create_table, [])
class TestReflectionDependencies(BaseReflectionTestCase):
requires = [User, Tweet]
def test_generate_dependencies(self):
models = self.introspector.generate_models(table_names=['tweet'])
self.assertEqual(set(models), set(('users', 'tweet')))
IUser = models['users']
ITweet = models['tweet']
self.assertEqual(set(ITweet._meta.fields), set((
'id', 'user', 'content', 'timestamp')))
self.assertEqual(set(IUser._meta.fields), set(('id', 'username')))
self.assertTrue(ITweet.user.rel_model is IUser)
self.assertTrue(ITweet.user.rel_field is IUser.id)
def test_ignore_backrefs(self):
models = self.introspector.generate_models(table_names=['users'])
self.assertEqual(set(models), set(('users',)))
class Note(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
status = IntegerField()
class TestReflectViews(BaseReflectionTestCase):
requires = [Note]
def setUp(self):
super(TestReflectViews, self).setUp()
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
def tearDown(self):
self.database.execute_sql('DROP VIEW notes_public')
super(TestReflectViews, self).tearDown()
def test_views_ignored_default(self):
models = self.introspector.generate_models()
self.assertFalse('notes_public' in models)
def test_introspect_view(self):
models = self.introspector.generate_models(include_views=True)
self.assertTrue('notes_public' in models)
NotesPublic = models['notes_public']
self.assertEqual(sorted(NotesPublic._meta.fields),
['content', 'timestamp'])
self.assertTrue(isinstance(NotesPublic.content, TextField))
self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField))
@skip_if(IS_SQLITE_OLD)
def test_introspect_view_integration(self):
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
Note.create(content=ct, status=st,
timestamp=datetime.datetime(2018, 1, 1 + i))
NP = self.introspector.generate_models(
table_names=['notes_public'], include_views=True)['notes_public']
self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [
('n3', datetime.datetime(2018, 1, 3)),
('n1', datetime.datetime(2018, 1, 1))])
class Event(TestModel):
key = TextField()
timestamp = DateTimeField(index=True)
metadata = TextField(default='')
class TestInteractiveHelpers(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| 35.283557
| 79
| 0.567169
|
import datetime
import os
import re
from peewee import *
from playhouse.reflection import *
from .base import IS_SQLITE_OLD
from .base import ModelTestCase
from .base import TestModel
from .base import db
from .base import requires_models
from .base import requires_sqlite
from .base import skip_if
from .base_models import Tweet
from .base_models import User
class ColTypes(TestModel):
f1 = BigIntegerField(index=True)
f2 = BlobField()
f3 = BooleanField()
f4 = CharField(max_length=50)
f5 = DateField()
f6 = DateTimeField()
f7 = DecimalField()
f8 = DoubleField()
f9 = FloatField()
f10 = IntegerField(unique=True)
f11 = AutoField()
f12 = TextField()
f13 = TimeField()
class Meta:
indexes = (
(('f10', 'f11'), True),
(('f11', 'f8', 'f13'), False),
)
class Nullable(TestModel):
nullable_cf = CharField(null=True)
nullable_if = IntegerField(null=True)
class RelModel(TestModel):
col_types = ForeignKeyField(ColTypes, backref='foo')
col_types_nullable = ForeignKeyField(ColTypes, null=True)
class FKPK(TestModel):
col_types = ForeignKeyField(ColTypes, primary_key=True)
class Underscores(TestModel):
_id = AutoField()
_name = CharField()
class Category(TestModel):
name = CharField(max_length=10)
parent = ForeignKeyField('self', null=True)
class Nugget(TestModel):
category_id = ForeignKeyField(Category, column_name='category_id')
category = CharField()
class BaseReflectionTestCase(ModelTestCase):
def setUp(self):
super(BaseReflectionTestCase, self).setUp()
self.introspector = Introspector.from_database(self.database)
class TestReflection(BaseReflectionTestCase):
requires = [ColTypes, Nullable, RelModel, FKPK, Underscores, Category,
Nugget]
def test_generate_models(self):
models = self.introspector.generate_models()
self.assertTrue(set((
'category',
'col_types',
'fkpk',
'nugget',
'nullable',
'rel_model',
'underscores')).issubset(set(models)))
def assertIsInstance(obj, klass):
self.assertTrue(isinstance(obj, klass))
category = models['category']
self.assertEqual(
sorted(category._meta.fields),
['id', 'name', 'parent'])
assertIsInstance(category.id, AutoField)
assertIsInstance(category.name, CharField)
assertIsInstance(category.parent, ForeignKeyField)
self.assertEqual(category.parent.rel_model, category)
fkpk = models['fkpk']
self.assertEqual(sorted(fkpk._meta.fields), ['col_types'])
assertIsInstance(fkpk.col_types, ForeignKeyField)
self.assertEqual(fkpk.col_types.rel_model, models['col_types'])
self.assertTrue(fkpk.col_types.primary_key)
relmodel = models['rel_model']
self.assertEqual(
sorted(relmodel._meta.fields),
['col_types', 'col_types_nullable', 'id'])
assertIsInstance(relmodel.col_types, ForeignKeyField)
assertIsInstance(relmodel.col_types_nullable, ForeignKeyField)
self.assertFalse(relmodel.col_types.null)
self.assertTrue(relmodel.col_types_nullable.null)
self.assertEqual(relmodel.col_types.rel_model,
models['col_types'])
self.assertEqual(relmodel.col_types_nullable.rel_model,
models['col_types'])
@requires_sqlite
def test_generate_models_indexes(self):
models = self.introspector.generate_models()
self.assertEqual(models['fkpk']._meta.indexes, [])
self.assertEqual(models['rel_model']._meta.indexes, [])
self.assertEqual(models['category']._meta.indexes, [])
col_types = models['col_types']
indexed = set(['f1'])
unique = set(['f10'])
for field in col_types._meta.sorted_fields:
self.assertEqual(field.index, field.name in indexed)
self.assertEqual(field.unique, field.name in unique)
indexes = col_types._meta.indexes
self.assertEqual(sorted(indexes), [
(['f10', 'f11'], True),
(['f11', 'f8', 'f13'], False),
])
def test_table_subset(self):
models = self.introspector.generate_models(table_names=[
'category',
'col_types',
'foobarbaz'])
self.assertEqual(sorted(models.keys()), ['category', 'col_types'])
@requires_sqlite
def test_sqlite_fk_re(self):
user_id_tests = [
'FOREIGN KEY("user_id") REFERENCES "users"("id")',
'FOREIGN KEY(user_id) REFERENCES users(id)',
'FOREIGN KEY ([user_id]) REFERENCES [users] ([id])',
'"user_id" NOT NULL REFERENCES "users" ("id")',
'user_id not null references users (id)',
]
fk_pk_tests = [
('"col_types_id" INTEGER NOT NULL PRIMARY KEY REFERENCES '
'"coltypes" ("f11")'),
'FOREIGN KEY ("col_types_id") REFERENCES "coltypes" ("f11")',
]
regex = SqliteMetadata.re_foreign_key
for test in user_id_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'user_id', 'users', 'id',
))
for test in fk_pk_tests:
match = re.search(regex, test, re.I)
self.assertEqual(match.groups(), (
'col_types_id', 'coltypes', 'f11',
))
def test_make_column_name(self):
tests = (
('Column', 'column'),
('Foo_iD', 'foo_id'),
('foo_id', 'foo_id'),
('foo_id_id', 'foo_id_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_id'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name), expected)
tests = (
('Foo_iD', 'foo'),
('foo_id', 'foo'),
('foo_id_id', 'foo_id'),
('foo', 'foo'),
('_id', '_id'),
('a123', 'a123'),
('and', 'and_'),
('Class', 'class_'),
('Class_ID', 'class_'),
)
for col_name, expected in tests:
self.assertEqual(
self.introspector.make_column_name(col_name, True), expected)
def test_make_model_name(self):
tests = (
('Table', 'Table'),
('table', 'Table'),
('table_baz', 'TableBaz'),
('foo__bar__baz2', 'FooBarBaz2'),
('foo12_3', 'Foo123'),
)
for table_name, expected in tests:
self.assertEqual(
self.introspector.make_model_name(table_name), expected)
def test_col_types(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', (BigIntegerField, IntegerField), False),
('f2', (BlobField, TextField), False),
('f3', (BooleanField, IntegerField), False),
('f4', CharField, False),
('f5', DateField, False),
('f6', DateTimeField, False),
('f7', DecimalField, False),
('f8', (DoubleField, FloatField), False),
('f9', FloatField, False),
('f10', IntegerField, False),
('f11', AutoField, False),
('f12', TextField, False),
('f13', TimeField, False))),
('rel_model', (
('col_types_id', ForeignKeyField, False),
('col_types_nullable_id', ForeignKeyField, True))),
('nugget', (
('category_id', ForeignKeyField, False),
('category', CharField, False))),
('nullable', (
('nullable_cf', CharField, True),
('nullable_if', IntegerField, True))),
('fkpk', (
('col_types_id', ForeignKeyField, False),)),
('underscores', (
('_id', AutoField, False),
('_name', CharField, False))),
('category', (
('name', CharField, False),
('parent_id', ForeignKeyField, True))),
)
for table_name, expected_columns in expected:
introspected_columns = columns[table_name]
for field_name, field_class, is_null in expected_columns:
if not isinstance(field_class, (list, tuple)):
field_class = (field_class,)
column = introspected_columns[field_name]
self.assertTrue(column.field_class in field_class,
"%s in %s" % (column.field_class, field_class))
self.assertEqual(column.nullable, is_null)
def test_foreign_keys(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
self.assertEqual(foreign_keys['col_types'], [])
rel_model = foreign_keys['rel_model']
self.assertEqual(len(rel_model), 2)
fkpk = foreign_keys['fkpk']
self.assertEqual(len(fkpk), 1)
fkpk_fk = fkpk[0]
self.assertEqual(fkpk_fk.table, 'fkpk')
self.assertEqual(fkpk_fk.column, 'col_types_id')
self.assertEqual(fkpk_fk.dest_table, 'col_types')
self.assertEqual(fkpk_fk.dest_column, 'f11')
category = foreign_keys['category']
self.assertEqual(len(category), 1)
category_fk = category[0]
self.assertEqual(category_fk.table, 'category')
self.assertEqual(category_fk.column, 'parent_id')
self.assertEqual(category_fk.dest_table, 'category')
self.assertEqual(category_fk.dest_column, 'id')
def test_table_names(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
names = (
('col_types', 'ColTypes'),
('nullable', 'Nullable'),
('rel_model', 'RelModel'),
('fkpk', 'Fkpk'))
for k, v in names:
self.assertEqual(model_names[k], v)
def test_column_meta(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
rel_model = columns['rel_model']
col_types_id = rel_model['col_types_id']
self.assertEqual(col_types_id.get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'field': "'f11'",
})
col_types_nullable_id = rel_model['col_types_nullable_id']
self.assertEqual(col_types_nullable_id.get_field_parameters(), {
'column_name': "'col_types_nullable_id'",
'null': True,
'backref': "'col_types_col_types_nullable_set'",
'model': 'ColTypes',
'field': "'f11'",
})
fkpk = columns['fkpk']
self.assertEqual(fkpk['col_types_id'].get_field_parameters(), {
'column_name': "'col_types_id'",
'model': 'ColTypes',
'primary_key': True,
'field': "'f11'"})
category = columns['category']
parent_id = category['parent_id']
self.assertEqual(parent_id.get_field_parameters(), {
'column_name': "'parent_id'",
'null': True,
'model': "'self'",
'field': "'id'",
})
nugget = columns['nugget']
category_fk = nugget['category_id']
self.assertEqual(category_fk.name, 'category_id')
self.assertEqual(category_fk.get_field_parameters(), {
'field': "'id'",
'model': 'Category',
'column_name': "'category_id'",
})
category = nugget['category']
self.assertEqual(category.name, 'category')
def test_get_field(self):
(columns,
primary_keys,
foreign_keys,
model_names,
indexes) = self.introspector.introspect()
expected = (
('col_types', (
('f1', ('f1 = BigIntegerField(index=True)',
'f1 = IntegerField(index=True)')),
('f2', ('f2 = BlobField()', 'f2 = TextField()')),
('f4', 'f4 = CharField()'),
('f5', 'f5 = DateField()'),
('f6', 'f6 = DateTimeField()'),
('f7', 'f7 = DecimalField()'),
('f10', 'f10 = IntegerField(unique=True)'),
('f11', 'f11 = AutoField()'),
('f12', ('f12 = TextField()', 'f12 = BlobField()')),
('f13', 'f13 = TimeField()'),
)),
('nullable', (
('nullable_cf', 'nullable_cf = '
'CharField(null=True)'),
('nullable_if', 'nullable_if = IntegerField(null=True)'),
)),
('fkpk', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes, "
'primary_key=True)'),
)),
('nugget', (
('category_id', 'category_id = ForeignKeyField('
"column_name='category_id', field='id', model=Category)"),
('category', 'category = CharField()'),
)),
('rel_model', (
('col_types_id', 'col_types = ForeignKeyField('
"column_name='col_types_id', field='f11', model=ColTypes)"),
('col_types_nullable_id', 'col_types_nullable = '
"ForeignKeyField(backref='col_types_col_types_nullable_set', "
"column_name='col_types_nullable_id', field='f11', "
'model=ColTypes, null=True)'),
)),
('underscores', (
('_id', '_id = AutoField()'),
('_name', '_name = CharField()'),
)),
('category', (
('name', 'name = CharField()'),
('parent_id', 'parent = ForeignKeyField('
"column_name='parent_id', field='id', model='self', "
'null=True)'),
)),
)
for table, field_data in expected:
for field_name, fields in field_data:
if not isinstance(fields, tuple):
fields = (fields,)
actual = columns[table][field_name].get_field()
self.assertTrue(actual in fields,
'%s not in %s' % (actual, fields))
class EventLog(TestModel):
data = CharField(constraints=[SQL('DEFAULT \'\'')])
timestamp = DateTimeField(constraints=[SQL('DEFAULT current_timestamp')])
flags = IntegerField(constraints=[SQL('DEFAULT 0')])
misc = TextField(constraints=[SQL('DEFAULT \'foo\'')])
class DefaultVals(TestModel):
key = CharField(constraints=[SQL('DEFAULT \'foo\'')])
value = IntegerField(constraints=[SQL('DEFAULT 0')])
class Meta:
primary_key = CompositeKey('key', 'value')
class TestReflectDefaultValues(BaseReflectionTestCase):
requires = [DefaultVals, EventLog]
@requires_sqlite
def test_default_values(self):
models = self.introspector.generate_models()
default_vals = models['default_vals']
create_table = (
'CREATE TABLE IF NOT EXISTS "default_vals" ('
'"key" VARCHAR(255) NOT NULL DEFAULT \'foo\', '
'"value" INTEGER NOT NULL DEFAULT 0, '
'PRIMARY KEY ("key", "value"))')
# Re-create table using the introspected schema.
self.assertSQL(default_vals._schema._create_table(), create_table, [])
default_vals.drop_table()
default_vals.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
default_vals = models['default_vals']
self.assertSQL(default_vals._schema._create_table(), create_table, [])
@requires_sqlite
def test_default_values_extended(self):
models = self.introspector.generate_models()
eventlog = models['event_log']
create_table = (
'CREATE TABLE IF NOT EXISTS "event_log" ('
'"id" INTEGER NOT NULL PRIMARY KEY, '
'"data" VARCHAR(255) NOT NULL DEFAULT \'\', '
'"timestamp" DATETIME NOT NULL DEFAULT current_timestamp, '
'"flags" INTEGER NOT NULL DEFAULT 0, '
'"misc" TEXT NOT NULL DEFAULT \'foo\')')
# Re-create table using the introspected schema.
self.assertSQL(eventlog._schema._create_table(), create_table, [])
eventlog.drop_table()
eventlog.create_table()
# Verify that the introspected schema has not changed.
models = self.introspector.generate_models()
eventlog = models['event_log']
self.assertSQL(eventlog._schema._create_table(), create_table, [])
class TestReflectionDependencies(BaseReflectionTestCase):
requires = [User, Tweet]
def test_generate_dependencies(self):
models = self.introspector.generate_models(table_names=['tweet'])
self.assertEqual(set(models), set(('users', 'tweet')))
IUser = models['users']
ITweet = models['tweet']
self.assertEqual(set(ITweet._meta.fields), set((
'id', 'user', 'content', 'timestamp')))
self.assertEqual(set(IUser._meta.fields), set(('id', 'username')))
self.assertTrue(ITweet.user.rel_model is IUser)
self.assertTrue(ITweet.user.rel_field is IUser.id)
def test_ignore_backrefs(self):
models = self.introspector.generate_models(table_names=['users'])
self.assertEqual(set(models), set(('users',)))
class Note(TestModel):
content = TextField()
timestamp = DateTimeField(default=datetime.datetime.now)
status = IntegerField()
class TestReflectViews(BaseReflectionTestCase):
requires = [Note]
def setUp(self):
super(TestReflectViews, self).setUp()
self.database.execute_sql('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
def tearDown(self):
self.database.execute_sql('DROP VIEW notes_public')
super(TestReflectViews, self).tearDown()
def test_views_ignored_default(self):
models = self.introspector.generate_models()
self.assertFalse('notes_public' in models)
def test_introspect_view(self):
models = self.introspector.generate_models(include_views=True)
self.assertTrue('notes_public' in models)
NotesPublic = models['notes_public']
self.assertEqual(sorted(NotesPublic._meta.fields),
['content', 'timestamp'])
self.assertTrue(isinstance(NotesPublic.content, TextField))
self.assertTrue(isinstance(NotesPublic.timestamp, DateTimeField))
@skip_if(IS_SQLITE_OLD)
def test_introspect_view_integration(self):
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
Note.create(content=ct, status=st,
timestamp=datetime.datetime(2018, 1, 1 + i))
NP = self.introspector.generate_models(
table_names=['notes_public'], include_views=True)['notes_public']
self.assertEqual([(np.content, np.timestamp) for np in NP.select()], [
('n3', datetime.datetime(2018, 1, 3)),
('n1', datetime.datetime(2018, 1, 1))])
class Event(TestModel):
key = TextField()
timestamp = DateTimeField(index=True)
metadata = TextField(default='')
class TestInteractiveHelpers(ModelTestCase):
requires = [Category, Event]
def test_generate_models(self):
M = generate_models(self.database)
self.assertTrue('category' in M)
self.assertTrue('event' in M)
def assertFields(m, expected):
actual = [(f.name, f.field_type) for f in m._meta.sorted_fields]
self.assertEqual(actual, expected)
assertFields(M['category'], [('id', 'AUTO'), ('name', 'VARCHAR'),
('parent', 'INT')])
assertFields(M['event'], [
('id', 'AUTO'),
('key', 'TEXT'),
('timestamp', 'DATETIME'),
('metadata', 'TEXT')])
| true
| true
|
790b8bcb2e61535fc042fec9fe81c76d4f11fe74
| 4,968
|
py
|
Python
|
goal_prox/envs/fetch/custom_push.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | 4
|
2021-11-17T20:19:34.000Z
|
2022-03-31T04:21:26.000Z
|
goal_prox/envs/fetch/custom_push.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | null | null | null |
goal_prox/envs/fetch/custom_push.py
|
clvrai/goal_prox_il
|
7c809b2ee575a69a14997068db06f3c1f3c8bd08
|
[
"MIT"
] | null | null | null |
import os
from gym import utils
from gym.envs.robotics import fetch_env
import numpy as np
from goal_prox.envs.holdout_sampler import HoldoutSampler, LineHoldoutSampler
from goal_prox.envs.old_holdout_sampler import OldHoldoutSampler
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'push.xml')
Y_NOISE = 0.02
X_NOISE = 0.05
OBJ_X_NOISE = 0.05
OFFSET = 0.10
class FetchPushEnvCustom(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='dense'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],
}
self.coverage = 1.0
self.goal_noise = True
self.rnd_gen = False
self.set_noise_ratio(1.0, 1.0)
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=0.0, target_in_the_air=False, target_offset=0,
# The ranges shouldn't matter because we sample ourselves
obj_range=0.1, target_range=0, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
def set_noise_ratio(self, noise_ratio, goal_noise_ratio):
self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, 0],
[noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE * 2], 4)
self.goal_sampler = OldHoldoutSampler(
[-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE * 2],
[goal_noise_ratio*X_NOISE, 0], 4)
# self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, -noise_ratio * Y_NOISE],
# [noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE], 4)
# self.goal_sampler = OldHoldoutSampler(
# [-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],
# [goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 4)
def _get_obs(self):
obs = super()._get_obs()
obs['observation'] = np.concatenate([obs['observation'],
obs['desired_goal']])
return obs
def relabel_ob(self, ob_current, ob_future):
import torch
if isinstance(ob_current, torch.Tensor):
return torch.cat([ob_current[:-3], ob_future[-3:]])
return np.concatenate([ob_current[:-3], ob_future[-3:]])
def is_reached(self, ob):
import torch
if isinstance(ob, torch.Tensor):
ob = ob.cpu()
dist = np.linalg.norm(ob[-3:] - ob[3:6])
return float(dist < self.distance_threshold)
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
object_xpos = self.initial_gripper_xpos[:2] + np.array([0.0, OFFSET])
object_xpos += self.obj_sampler.sample(self.coverage,
self.np_random)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
goal = self.initial_gripper_xpos[:3] + np.array([0.0, -1*OFFSET, 0.0])
if self.goal_noise:
goal[:2]+= self.goal_sampler.sample(self.coverage, self.np_random)
goal += self.target_offset
goal[2] = self.height_offset
return goal.copy()
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
lookat = [1.34193362, 0.74910034, 0.55472272]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 1.3
self.viewer.cam.azimuth = 132
self.viewer.cam.elevation = -14.
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
class FetchDebugPushEnv(FetchPushEnvCustom):
def set_noise_ratio(self, noise_ratio, goal_noise_ratio):
noise_ratio *= 1
y_noise_scale = 0.15 / (noise_ratio * Y_NOISE)
#y_noise_scale = 1.0
self.obj_sampler = LineHoldoutSampler(
[-noise_ratio * OBJ_X_NOISE, -y_noise_scale*noise_ratio * Y_NOISE],
[noise_ratio * OBJ_X_NOISE, y_noise_scale*noise_ratio * Y_NOISE])
self.goal_sampler = HoldoutSampler(
[-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],
[goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 1, True)
| 38.8125
| 100
| 0.636675
|
import os
from gym import utils
from gym.envs.robotics import fetch_env
import numpy as np
from goal_prox.envs.holdout_sampler import HoldoutSampler, LineHoldoutSampler
from goal_prox.envs.old_holdout_sampler import OldHoldoutSampler
MODEL_XML_PATH = os.path.join('fetch', 'push.xml')
Y_NOISE = 0.02
X_NOISE = 0.05
OBJ_X_NOISE = 0.05
OFFSET = 0.10
class FetchPushEnvCustom(fetch_env.FetchEnv, utils.EzPickle):
def __init__(self, reward_type='dense'):
initial_qpos = {
'robot0:slide0': 0.405,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],
}
self.coverage = 1.0
self.goal_noise = True
self.rnd_gen = False
self.set_noise_ratio(1.0, 1.0)
fetch_env.FetchEnv.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=0.0, target_in_the_air=False, target_offset=0,
obj_range=0.1, target_range=0, distance_threshold=0.05,
initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
def set_noise_ratio(self, noise_ratio, goal_noise_ratio):
self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, 0],
[noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE * 2], 4)
self.goal_sampler = OldHoldoutSampler(
[-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE * 2],
[goal_noise_ratio*X_NOISE, 0], 4)
# self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, -noise_ratio * Y_NOISE],
# [noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE], 4)
# self.goal_sampler = OldHoldoutSampler(
# [-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],
# [goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 4)
def _get_obs(self):
obs = super()._get_obs()
obs['observation'] = np.concatenate([obs['observation'],
obs['desired_goal']])
return obs
def relabel_ob(self, ob_current, ob_future):
import torch
if isinstance(ob_current, torch.Tensor):
return torch.cat([ob_current[:-3], ob_future[-3:]])
return np.concatenate([ob_current[:-3], ob_future[-3:]])
def is_reached(self, ob):
import torch
if isinstance(ob, torch.Tensor):
ob = ob.cpu()
dist = np.linalg.norm(ob[-3:] - ob[3:6])
return float(dist < self.distance_threshold)
def _reset_sim(self):
self.sim.set_state(self.initial_state)
# Randomize start position of object.
if self.has_object:
object_xpos = self.initial_gripper_xpos[:2] + np.array([0.0, OFFSET])
object_xpos += self.obj_sampler.sample(self.coverage,
self.np_random)
object_qpos = self.sim.data.get_joint_qpos('object0:joint')
assert object_qpos.shape == (7,)
object_qpos[:2] = object_xpos
self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
goal = self.initial_gripper_xpos[:3] + np.array([0.0, -1*OFFSET, 0.0])
if self.goal_noise:
goal[:2]+= self.goal_sampler.sample(self.coverage, self.np_random)
goal += self.target_offset
goal[2] = self.height_offset
return goal.copy()
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
lookat = [1.34193362, 0.74910034, 0.55472272]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 1.3
self.viewer.cam.azimuth = 132
self.viewer.cam.elevation = -14.
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
class FetchDebugPushEnv(FetchPushEnvCustom):
def set_noise_ratio(self, noise_ratio, goal_noise_ratio):
noise_ratio *= 1
y_noise_scale = 0.15 / (noise_ratio * Y_NOISE)
#y_noise_scale = 1.0
self.obj_sampler = LineHoldoutSampler(
[-noise_ratio * OBJ_X_NOISE, -y_noise_scale*noise_ratio * Y_NOISE],
[noise_ratio * OBJ_X_NOISE, y_noise_scale*noise_ratio * Y_NOISE])
self.goal_sampler = HoldoutSampler(
[-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],
[goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 1, True)
| true
| true
|
790b8e1a27e92a8bb8302238f047680e367050f9
| 1,380
|
py
|
Python
|
scripts/File.py
|
tanvirtin/Cloud-Backup
|
751c3e7ac4419729f25183e5dcb9fa4d230556ed
|
[
"MIT"
] | 4
|
2017-04-17T23:40:43.000Z
|
2020-04-24T03:31:56.000Z
|
scripts/File.py
|
tanvirtin/Google-Drive-File-System-Synchronization-with-Ubuntu
|
751c3e7ac4419729f25183e5dcb9fa4d230556ed
|
[
"MIT"
] | null | null | null |
scripts/File.py
|
tanvirtin/Google-Drive-File-System-Synchronization-with-Ubuntu
|
751c3e7ac4419729f25183e5dcb9fa4d230556ed
|
[
"MIT"
] | null | null | null |
'''
Class Name: File
Purpose: The purpose of this class is represent data of a particular file
in a file system.
'''
class File:
def __init__(self, name = None, directory = None, date = None, fId = None, folderId = None, extension = ""):
self.__name = name
self.__directory = directory
self.__date = date
self.__id = fId
self.__folderId = folderId
self.__mimeType = extension
def __repr__(self):
return self.getName
'''
Name: getName
Purpose: A getter method for the name of the file.
return: private attribute __name
'''
@property
def getName(self):
return self.__name
'''
Name: getDir
Purpose: a getter method for the name of the directory the file is in.
return: private attribute __directory
'''
@property
def getDir(self):
return self.__directory
'''
Name: getLastModified
Purpose: a getter method for the date that the file was last modified at
return: private attribute __date
'''
@property
def getLastModified(self):
return self.__date
'''
Name: getDetails
Purpose: Returns the full file address of a file object.
return: a string representing the full file details
'''
def getDetails(self):
return self.getDir + self.getName
@property
def getFileId(self):
return self.__id
@property
def getFolderId(self):
return self.__folderId
@property
def getMimeType(self):
return self.__mimeType
| 23
| 109
| 0.717391
|
class File:
def __init__(self, name = None, directory = None, date = None, fId = None, folderId = None, extension = ""):
self.__name = name
self.__directory = directory
self.__date = date
self.__id = fId
self.__folderId = folderId
self.__mimeType = extension
def __repr__(self):
return self.getName
@property
def getName(self):
return self.__name
@property
def getDir(self):
return self.__directory
@property
def getLastModified(self):
return self.__date
def getDetails(self):
return self.getDir + self.getName
@property
def getFileId(self):
return self.__id
@property
def getFolderId(self):
return self.__folderId
@property
def getMimeType(self):
return self.__mimeType
| true
| true
|
790b8e49c61be79832743ad5b30bc3222f22bcf8
| 34,574
|
py
|
Python
|
openmdao/utils/general_utils.py
|
andrewellis55/OpenMDAO
|
390956b787c22805e126145f0358b79fad54af47
|
[
"Apache-2.0"
] | null | null | null |
openmdao/utils/general_utils.py
|
andrewellis55/OpenMDAO
|
390956b787c22805e126145f0358b79fad54af47
|
[
"Apache-2.0"
] | 10
|
2019-12-31T19:15:07.000Z
|
2022-03-31T23:00:21.000Z
|
openmdao/utils/general_utils.py
|
DKilkenny/OpenMDAO
|
d01fd526e71add4a203b7d32c534e1eab07dafaf
|
[
"Apache-2.0"
] | null | null | null |
"""Some miscellaneous utility functions."""
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
# note: this is a Python 3.3 change, clean this up for OpenMDAO 3.x
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
# Certain command line tools can make use of this to allow visualization of models when errors
# are present that would normally cause setup to abort.
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
"""
Convert name of auto_ivc val to promoted input name.
Parameters
----------
conns_dict : dict
Dictionary of global connections.
name : str
Name of auto_ivc to be found.
Returns
-------
str
Promoted input name.
"""
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
"""
Disable certain errors that will prevent setup from completing.
Parameters
----------
flag : bool or None
If not None, set the value of _ignore_errors to this value.
Returns
-------
bool
The current value of _ignore_errors.
"""
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
"""
Raise an exception or issue a warning, depending on the value of _ignore_errors.
Parameters
----------
msg : str
The error/warning message.
exc : Exception class
This exception class is used to create the exception to be raised.
category : warning class
This category is the class of warning to be issued.
err : bool
If None, use ignore_errors(), otherwise use value of err to determine whether to
raise an exception (err=True) or issue a warning (err=False).
"""
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
"""
Set ignore_errors to the given flag in this context.
Parameters
----------
flag : bool
If not None, set ignore_errors to this value.
Yields
------
None
"""
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
"""
Display a simple warning message without the annoying extra line showing the warning call.
Parameters
----------
msg : str
The warning message.
category : class
The warning class.
stacklevel : int
Number of levels up the stack to identify as the warning location.
"""
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
"""
Make value compatible with the specified shape or the shape of indices.
Parameters
----------
name : str
The name of the value.
value : float or list or tuple or ndarray or Iterable
The value of a variable.
shape : int or tuple or list or None
The expected or desired shape of the value.
indices : Indexer or None
The indices into a source variable.
Returns
-------
ndarray
The value in a shape compatible with the specified shape and/or indices.
tuple
The resulting shape of the value.
Raises
------
ValueError
If value cannot be made to conform to shape or if shape and indices
are incompatible.
"""
if isinstance(value, Iterable):
value = np.asarray(value)
# if shape is not given, infer from value (if not scalar) or indices
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass # use shape provided or shape of value and check vs. shape of indices later
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
# shape is not determined, assume the shape of value was intended
value = np.atleast_1d(value)
shape = value.shape
else:
# shape is determined, if value is scalar assign it to array of shape
# otherwise make sure value is an array of the determined shape
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
r"""
Determine proper values of adder and scaler based on user arguments.
Adder and Scaler are used internally because the transformation is
slightly more efficient.
Parameters
----------
ref0 : float or ndarray, optional
Value of response variable that scales to 0.0 in the driver.
ref : float or ndarray, optional
Value of response variable that scales to 1.0 in the driver.
adder : float or ndarray, optional
Value to add to the model value to get the scaled value. Adder
is first in precedence.
scaler : float or ndarray, optional
Value to multiply the model value to get the scaled value. Scaler
is second in precedence.
Returns
-------
tuple
Adder and scaler, properly formatted and based on ref/ref0 if provided.
Raises
------
ValueError
If both ref/ref0 and adder/scaler were provided.
Notes
-----
The response can be scaled using ref and ref0.
The argument :code:`ref0` represents the physical value when the scaled value is 0.
The argument :code:`ref` represents the physical value when the scaled value is 1.
"""
# Affine scaling cannot be used with scalers/adders
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
# Convert ref/ref0 to scaler/adder so we can scale the bounds
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
"""
For testing, sets the pyoptsparse optimizer using the given optimizer name.
This may be modified based on the value of OPENMDAO_FORCE_PYOPTSPARSE_OPT.
This can be used on systems that have SNOPT installed to force them to use
SLSQP in order to mimic our test machines on travis and appveyor.
Parameters
----------
optname : str
Name of pyoptsparse optimizer that is requested by the test.
fallback : bool
If True, fall back to SLSQP if optname can't be found.
Returns
-------
object
Pyoptsparse optimizer instance.
str
Pyoptsparse optimizer string.
"""
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
"""
Format array option values.
Checks that the given array values are either None, float, or an iterable
of numeric values. On output all iterables of numeric values are
converted to a flat np.ndarray. If values is scalar, it is converted
to float.
Parameters
----------
name : str
The path of the variable relative to the current system.
values : float or numpy ndarray or Iterable
Values of the array option to be formatted to the expected form.
val_if_none : float or numpy ndarray
The default value for the option if values is None.
flatten : bool
Set to True to flatten any ndarray return.
Returns
-------
float or np.ndarray
Values transformed to the expected form.
Raises
------
ValueError
If values is Iterable but cannot be converted to a numpy ndarray
TypeError
If values is scalar, not None, and not a Number.
"""
# Convert adder to ndarray/float as necessary
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
"""
A fake dictionary that always reports __contains__(name) to be True.
"""
def __contains__(self, name):
"""
Return if the named object is contained.
Parameters
----------
name : str
Name of the object being looked up.
Returns
-------
bool
Always returns True.
"""
return True
def all_ancestors(pathname, delim='.'):
"""
Return a generator of pathnames of the starting object and all of its parents.
Pathnames are ordered from longest to shortest.
Parameters
----------
pathname : str
Pathname of starting object.
delim : str
Delimiter used to split the name.
Yields
------
str
"""
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
"""
Return list of variable names that match given pattern.
Parameters
----------
pattern : str
Glob pattern or variable name.
var_list : list of str
List of variable names to search for pattern.
Returns
-------
list
Variable names that match pattern.
"""
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
"""
Pad a string so that they all line up when stacked.
Parameters
----------
name : str
The string to pad.
pad_num : int
The number of total spaces the string should take up.
quotes : bool
If name should be quoted.
Returns
-------
str
Padded string.
"""
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
"""
Call `run_model` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
ignore_exception : bool
Set to True to ignore an exception of any kind.
Returns
-------
string
Output from calling `run_model` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
"""
Call `run_driver` on problem and capture output.
Parameters
----------
prob : Problem
An instance of Problem.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
string
Output from calling `run_driver` on the Problem, captured from stdout.
"""
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
"""
Context manager for setting numpy print options.
Set print options for the scope of the `with` block, and restore the old
options at the end. See `numpy.set_printoptions` for the full description of
available options. If any invalid options are specified, they will be ignored.
>>> with printoptions(precision=2):
... print(np.array([2.0])) / 3
[0.67]
The `as`-clause of the `with`-statement gives the current print options:
>>> with printoptions(precision=2) as opts:
... assert_equal(opts, np.get_printoptions())
Parameters
----------
*args : list
Variable-length argument list.
**kwds : dict
Arbitrary keyword arguments.
Yields
------
str or int
See Also
--------
set_printoptions, get_printoptions
"""
opts = np.get_printoptions()
# ignore any keyword args that are not valid in this version of numpy
# e.g. numpy <=1.13 does not have the 'floatmode' option
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
"""
Do nothing.
Useful when you have a block of code that only requires a context manager sometimes,
and you don't want to repeat the context managed block.
Returns
-------
contextmanager
A do nothing context manager.
"""
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
"""
Remove white-space characters from the given string.
If neither right nor left is specified (the default),
then all white-space is removed.
Parameters
----------
s : str
The string to be modified.
right : bool
If True, remove white-space from the end of the string.
left : bool
If True, remove white-space from the beginning of the string.
Returns
-------
str
The string with white-space removed.
"""
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else: # left
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
"""
Translate a given string into a valid python variable name.
Parameters
----------
s : str
The string to be translated.
Returns
-------
str
The valid python name string.
"""
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
"""
Recursively convert numpy types to native types for JSON serialization.
This function is for making serizializable dictionary keys, so no containers.
This function should NOT be passed into json.dump or json.dumps as the 'default' arg.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
"""
Try to convert some extra types during JSON serialization.
This is intended to be passed to json.dump or json.dumps as the 'default' arg. It will
attempt to convert values if possible, but if no conversion works, will return
'unserializable object (<type>)' instead of raising a TypeError.
Parameters
----------
o : object
The object to be converted.
Returns
-------
object
The converted object.
"""
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
"""
Construct a set containing the specified character strings.
Parameters
----------
str_data : None, str, or list of strs
Character string(s) to be included in the set.
name : str, optional
A name to be used in error messages.
Returns
-------
set
A set of character strings.
"""
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
"""
Check to see if the variable names pass through the includes and excludes filter.
Parameters
----------
name : str
Unpromoted variable name to be checked for match.
prom_name : str
Promoted variable name to be checked for match.
includes : iter of str or None
Glob patterns for name to include in the filtering. None, the default, means
to include all.
excludes : iter of str or None
Glob patterns for name to exclude in the filtering.
Returns
-------
bool
Return True if the name passes through the filtering of includes and excludes.
"""
diff = name != prom_name
# Process excludes
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
# Process includes
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
"""
Return True if the given environment variable is 'truthy'.
Parameters
----------
env_var : str
The name of the environment variable.
Returns
-------
bool
True if the specified environment variable is 'truthy'.
"""
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
"""
Return the common dotted subpath found in all of the given dotted pathnames.
Parameters
----------
pathnames : iter of str
Dotted pathnames of systems.
Returns
-------
str
Common dotted subpath. Returns '' if no common subpath is found.
"""
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
"""
Check if an indexer contains a slice or ellipsis operator.
Parameters
----------
indices : ndarray
Indices to check.
Returns
-------
bool
Returns True if indices contains a colon or ellipsis operator.
"""
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
"""
Return an index array based on a slice or slice tuple and the array size and shape.
Parameters
----------
slicer : slice or tuple containing slices
Slice object to slice array
arr_size : int
Size of output array
arr_shape : tuple
Tuple of output array shape
Returns
-------
array
Returns the sliced indices.
"""
if isinstance(slicer, slice):
# for a simple slice we can use less memory
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
"""
Yield keys from prom_dict with promoted input names converted to ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
str
name
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
"""
Yield items from prom_dict with promoted input names converted to ivc source names.
The result is that all names are absolute.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Yields
------
tuple
name, metadata
"""
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
"""
Convert a dictionary with promoted input names into one with ivc source names.
Parameters
----------
prom_dict : dict
Original dict with some promoted paths.
Returns
-------
dict
New dict with ivc source pathnames.
"""
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
"""
Compute lower level src_indices based on parent src_indices.
Parameters
----------
parent_src_inds : ndarray
Parent src_indices.
parent_src_shape : tuple
Shape of source expected by parent.
my_src_inds : ndarray or fancy index
Src_indices at the current system level, before conversion.
my_src_shape : tuple
Expected source shape at the current system level.
Returns
-------
ndarray
Final src_indices based on those of the parent.
"""
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
"""
Return shape as a tuple.
Parameters
----------
shape : int or tuple
The given shape.
Returns
-------
tuple
The shape as a tuple.
"""
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
"""
Return (owner, promoted_src, promoted_tgt) for the given connected target.
Note : this is not speedy. It's intended for use only in error messages.
Parameters
----------
system : System
Any System. The search always goes from the model level down.
tgt : str
Absolute pathname of the target variable.
Returns
-------
tuple
(wning group, promoted source name, promoted target name).
"""
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
# connection is explicit
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
"""
Make import of wingdbstub contingent on value of WING_DBG environment variable.
Also will import wingdbstub from the WINGHOME directory.
"""
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
"""
Iterable object yielding local indices while iterating over local or distributed vars.
The number of iterations for a distributed variable will be the full distributed size of the
variable but None will be returned for any indices that are not local to the given rank.
Parameters
----------
system : System
Containing System.
vname : str
Name of the variable.
use_vec_offset : bool
If True, return indices for the given variable within its vector, else just return
indices within the variable itself, i.e. range(var_size).
Attributes
----------
_inds : ndarray
Variable indices (unused for distributed variables).
_dist_size : int
Full size of distributed variable.
_start : int
Starting index of distributed variable on this rank.
_end : int
Last index + 1 of distributed variable on this rank.
_offset : int
Offset of this variable into the local vector,.
_iter : method
The iteration method used.
"""
def __init__(self, system, vname, use_vec_offset=True):
"""
Initialize the iterator.
"""
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
"""
Iterate over a local non-distributed variable.
Yields
------
int
Variable index.
"""
yield from self._inds
def _dist_iter(self):
"""
Iterate over a distributed variable.
Yields
------
int or None
Variable index or None if index is not local to this rank.
"""
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
"""
Return an iterator.
Returns
-------
iterator
An iterator over our indices.
"""
return self._iter()
| 27.180818
| 97
| 0.607161
|
from contextlib import contextmanager
import os
import re
import sys
import warnings
import unittest
from fnmatch import fnmatchcase
from io import StringIO
from numbers import Number
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import numpy as np
from openmdao.core.constants import INT_DTYPE, INF_BOUND
from openmdao.utils.om_warnings import issue_warning, _warn_simple_format, warn_deprecation
_ignore_errors = False
def _convert_auto_ivc_to_conn_name(conns_dict, name):
for key, val in conns_dict.items():
if val == name:
return key
def ignore_errors(flag=None):
global _ignore_errors
if flag is not None:
_ignore_errors = flag
return _ignore_errors
def conditional_error(msg, exc=RuntimeError, category=UserWarning, err=None):
if (err is None and ignore_errors()) or err is False:
issue_warning(msg, category=category)
else:
raise exc(msg)
@contextmanager
def ignore_errors_context(flag=True):
save = ignore_errors()
ignore_errors(flag)
try:
yield
finally:
ignore_errors(save)
def simple_warning(msg, category=UserWarning, stacklevel=2):
warn_deprecation('simple_warning is deprecated. '
'Use openmdao.utils.om_warnings.issue_warning instead.')
old_format = warnings.formatwarning
warnings.formatwarning = _warn_simple_format
try:
warnings.warn(msg, category, stacklevel)
finally:
warnings.formatwarning = old_format
def ensure_compatible(name, value, shape=None, indices=None):
if isinstance(value, Iterable):
value = np.asarray(value)
if shape is not None:
if isinstance(shape, numbers.Integral):
shape = (shape,)
elif isinstance(shape, list):
shape = tuple(shape)
elif not np.isscalar(value):
shape = np.atleast_1d(value).shape
if indices is not None:
if not indices._flat_src and shape is None:
raise RuntimeError("src_indices for '%s' is not flat, so its input "
"shape must be provided." % name)
try:
indshape = indices.indexed_src_shape
except (RuntimeError, ValueError, TypeError):
pass
else:
if shape is not None and np.product(indshape) != np.product(shape):
raise ValueError("Shape of indices %s does not match shape of %s for '%s'." %
(indshape, shape, name))
if shape is None:
shape = indshape
if shape is None:
value = np.atleast_1d(value)
shape = value.shape
else:
if np.isscalar(value) or value.shape == (1,):
value = np.ones(shape) * value
else:
value = np.atleast_1d(value).astype(np.float64)
if value.shape != shape:
raise ValueError("Incompatible shape for '%s': Expected %s but got %s." %
(name, shape, value.shape))
return value, shape
def determine_adder_scaler(ref0, ref, adder, scaler):
if ref0 is not None or ref is not None:
if scaler is not None or adder is not None:
raise ValueError('Inputs ref/ref0 are mutually exclusive '
'with scaler/adder')
if ref is None:
ref = 1.0
if ref0 is None:
ref0 = 0.0
adder = -ref0
scaler = 1.0 / (ref + adder)
else:
if scaler is None:
scaler = 1.0
if adder is None:
adder = 0.0
adder = format_as_float_or_array('adder', adder, val_if_none=0.0, flatten=True)
scaler = format_as_float_or_array('scaler', scaler, val_if_none=1.0, flatten=True)
return adder, scaler
def set_pyoptsparse_opt(optname, fallback=True):
OPT = None
opt = None
OPTIMIZER = None
force = os.environ.get('OPENMDAO_FORCE_PYOPTSPARSE_OPT')
if force:
optname = force
from unittest.mock import Mock
try:
from pyoptsparse import OPT
try:
opt = OPT(optname)
OPTIMIZER = optname
except Exception:
if fallback and optname != 'SLSQP':
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
else:
if fallback and isinstance(opt, Mock):
try:
opt = OPT('SLSQP')
OPTIMIZER = 'SLSQP'
except Exception:
pass
except Exception:
pass
if isinstance(opt, Mock):
OPT = OPTIMIZER = None
if not fallback and OPTIMIZER != optname:
raise unittest.SkipTest("pyoptsparse is not providing %s" % optname)
return OPT, OPTIMIZER
def format_as_float_or_array(name, values, val_if_none=0.0, flatten=False):
if isinstance(values, np.ndarray):
if flatten:
values = values.flatten()
elif not isinstance(values, str) \
and isinstance(values, Iterable):
values = np.asarray(values, dtype=float)
if flatten:
values = values.flatten()
elif values is None:
values = val_if_none
elif values == float('inf'):
values = INF_BOUND
elif values == -float('inf'):
values = -INF_BOUND
elif isinstance(values, numbers.Number):
values = float(values)
else:
raise TypeError('Expected values of {0} to be an Iterable of '
'numeric values, or a scalar numeric value. '
'Got {1} instead.'.format(name, values))
return values
class ContainsAll(object):
def __contains__(self, name):
return True
def all_ancestors(pathname, delim='.'):
parts = pathname.split(delim)
for i in range(len(parts), 0, -1):
yield delim.join(parts[:i])
def find_matches(pattern, var_list):
if pattern == '*':
return var_list
elif pattern in var_list:
return [pattern]
return [name for name in var_list if fnmatchcase(name, pattern)]
def pad_name(name, pad_num=10, quotes=False):
l_name = len(name)
quotes_len = 2 if quotes else 0
if l_name + quotes_len < pad_num:
pad = pad_num - (l_name + quotes_len)
if quotes:
pad_str = "'{name}'{sep:<{pad}}"
else:
pad_str = "{name}{sep:<{pad}}"
pad_name = pad_str.format(name=name, sep='', pad=pad)
return pad_name
else:
if quotes:
return "'{0}'".format(name)
else:
return '{0}'.format(name)
def run_model(prob, ignore_exception=False):
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
prob.run_model()
except Exception as err:
if not ignore_exception:
raise err
finally:
sys.stdout = stdout
return strout.getvalue()
def run_driver(prob):
stdout = sys.stdout
strout = StringIO()
sys.stdout = strout
try:
failed = prob.run_driver()
finally:
sys.stdout = stdout
return failed, strout.getvalue()
@contextmanager
def printoptions(*args, **kwds):
opts = np.get_printoptions()
kw_opts = dict((key, val) for key, val in kwds.items() if key in opts)
try:
np.set_printoptions(*args, **kw_opts)
yield np.get_printoptions()
finally:
np.set_printoptions(**opts)
def _nothing():
yield None
def do_nothing_context():
return contextmanager(_nothing)()
def remove_whitespace(s, right=False, left=False):
if not left and not right:
return re.sub(r"\s+", "", s, flags=re.UNICODE)
elif right and left:
return re.sub(r"^\s+|\s+$", "", s, flags=re.UNICODE)
elif right:
return re.sub(r"\s+$", "", s, flags=re.UNICODE)
else:
return re.sub(r"^\s+", "", s, flags=re.UNICODE)
_badtab = r'`~@#$%^&*()[]{}-+=|\/?<>,.:;'
_transtab = str.maketrans(_badtab, '_' * len(_badtab))
def str2valid_python_name(s):
return s.translate(_transtab)
_container_classes = (list, tuple, set)
def make_serializable(o):
if isinstance(o, _container_classes):
return [make_serializable(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [make_serializable(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
try:
return o.to_json()
except AttributeError:
return o.__class__.__name__
else:
return o
def make_serializable_key(o):
if isinstance(o, str):
return o
elif isinstance(o, np.number):
return o.item()
elif hasattr(o, '__dict__'):
return o.__class__.__name__
else:
return str(o)
def default_noraise(o):
if isinstance(o, _container_classes):
return [default_noraise(item) for item in o]
elif isinstance(o, dict):
s_key = [make_serializable_key(item) for item in o.keys()]
s_val = [default_noraise(item) for item in o.values()]
return dict(zip(s_key, s_val))
elif isinstance(o, np.ndarray):
return o.tolist()
elif isinstance(o, np.number):
return o.item()
elif isinstance(o, (str, float, int)):
return o
elif isinstance(o, bool) or isinstance(o, complex):
return str(o)
elif hasattr(o, '__dict__'):
return o.__class__.__name__
elif o is None:
return None
else:
return f"unserializable object ({type(o).__name__})"
def make_set(str_data, name=None):
if not str_data:
return set()
elif isinstance(str_data, str):
return {str_data}
elif isinstance(str_data, (set, list)):
for item in str_data:
if not isinstance(item, str):
typ = type(item).__name__
msg = f"Items in tags should be of type string, but type '{typ}' was found."
raise TypeError(msg)
if isinstance(str_data, set):
return str_data
elif isinstance(str_data, list):
return set(str_data)
elif name:
raise TypeError("The {} argument should be str, set, or list: {}".format(name, str_data))
else:
raise TypeError("The argument should be str, set, or list: {}".format(str_data))
def match_includes_excludes(name, includes=None, excludes=None):
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern):
return False
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern):
return True
return False
def match_prom_or_abs(name, prom_name, includes=None, excludes=None):
diff = name != prom_name
if excludes is not None:
for pattern in excludes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return False
if includes is None:
return True
else:
for pattern in includes:
if fnmatchcase(name, pattern) or (diff and fnmatchcase(prom_name, pattern)):
return True
return False
_falsey = {'0', 'false', 'no', ''}
def env_truthy(env_var):
return os.environ.get(env_var, '0').lower() not in _falsey
def common_subpath(pathnames):
if len(pathnames) == 1:
return pathnames[0]
if pathnames:
npaths = len(pathnames)
splits = [p.split('.') for p in pathnames]
minlen = np.min([len(s) for s in splits])
for common_loc in range(minlen):
p0 = splits[0][common_loc]
for i in range(1, npaths):
if p0 != splits[i][common_loc]:
break
else:
continue
break
else:
common_loc += 1
return '.'.join(splits[0][:common_loc])
return ''
def _is_slicer_op(indices):
if isinstance(indices, tuple):
return any(isinstance(i, slice) or i is ... for i in indices)
return isinstance(indices, slice)
def _slice_indices(slicer, arr_size, arr_shape):
if isinstance(slicer, slice):
start, stop, step = slicer.start, slicer.stop, slicer.step
if start is None:
start = 0
if stop is None:
stop = arr_size
if step is None:
step = 1
return np.arange(start, stop, step, dtype=INT_DTYPE).reshape(arr_shape)
else:
return np.arange(arr_size, dtype=INT_DTYPE).reshape(arr_shape)[slicer]
def _prom2ivc_src_name_iter(prom_dict):
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source']
else:
yield name
def _prom2ivc_src_item_iter(prom_dict):
for name, meta in prom_dict.items():
if meta['ivc_source'] is not None:
yield meta['ivc_source'], meta
else:
yield name, meta
def _prom2ivc_src_dict(prom_dict):
return {name: meta for name, meta in _prom2ivc_src_item_iter(prom_dict)}
def convert_src_inds(parent_src_inds, parent_src_shape, my_src_inds, my_src_shape):
if parent_src_inds is None:
return my_src_inds
elif my_src_inds is None:
return parent_src_inds
if my_src_inds._flat_src:
return parent_src_inds.shaped_array(flat=True)[my_src_inds.flat()]
else:
return parent_src_inds.shaped_array(flat=False).reshape(my_src_shape)[my_src_inds()]
def shape2tuple(shape):
if isinstance(shape, Number):
return (shape,)
elif shape is None:
return shape
return tuple(shape)
def get_connection_owner(system, tgt):
from openmdao.core.group import Group
model = system._problem_meta['model_ref']()
src = model._conn_global_abs_in2out[tgt]
abs2prom = model._var_allprocs_abs2prom
if src in abs2prom['output'] and tgt in abs2prom['input'][tgt]:
if abs2prom['input'][tgt] != abs2prom['output'][src]:
for g in model.system_iter(include_self=True, recurse=True, typ=Group):
if g._manual_connections:
tprom = g._var_allprocs_abs2prom['input'][tgt]
if tprom in g._manual_connections:
return g.pathname, g._var_allprocs_abs2prom['output'][src], tprom
return None, None, None
def wing_dbg():
if env_truthy('WING_DBG'):
import sys
import os
save = sys.path
new = sys.path[:] + [os.environ['WINGHOME']]
sys.path = new
try:
import wingdbstub
finally:
sys.path = save
class LocalRangeIterable(object):
def __init__(self, system, vname, use_vec_offset=True):
self._dist_size = 0
abs2meta = system._var_allprocs_abs2meta['output']
if vname in abs2meta:
sizes = system._var_sizes['output']
slices = system._outputs.get_slice_dict()
else:
abs2meta = system._var_allprocs_abs2meta['input']
sizes = system._var_sizes['input']
slices = system._inputs.get_slice_dict()
if abs2meta[vname]['distributed']:
var_idx = system._var_allprocs_abs2idx[vname]
rank = system.comm.rank
self._offset = np.sum(sizes[rank, :var_idx]) if use_vec_offset else 0
self._iter = self._dist_iter
self._start = np.sum(sizes[:rank, var_idx])
self._end = self._start + sizes[rank, var_idx]
self._dist_size = np.sum(sizes[:, var_idx])
else:
self._iter = self._serial_iter
if use_vec_offset:
self._inds = range(slices[vname].start, slices[vname].stop)
else:
self._inds = range(slices[vname].stop - slices[vname].start)
def _serial_iter(self):
yield from self._inds
def _dist_iter(self):
start = self._start
end = self._end
for i in range(self._dist_size):
if i >= start and i < end:
yield i - start + self._offset
else:
yield None
def __iter__(self):
return self._iter()
| true
| true
|
790b8ed7a9d90b116983f2c1e6805773324e94d8
| 909
|
py
|
Python
|
twlived/events.py
|
tausackhn/twlived
|
e065fe5efc479ad2ec0ee0053994cba857e39ae2
|
[
"MIT"
] | 11
|
2017-04-11T13:09:36.000Z
|
2021-11-27T22:14:34.000Z
|
twlived/events.py
|
tausackhn/twlived
|
e065fe5efc479ad2ec0ee0053994cba857e39ae2
|
[
"MIT"
] | 1
|
2017-09-07T10:29:53.000Z
|
2017-09-07T16:01:01.000Z
|
twlived/events.py
|
tausackhn/twlived
|
e065fe5efc479ad2ec0ee0053994cba857e39ae2
|
[
"MIT"
] | 1
|
2021-04-15T16:07:58.000Z
|
2021-04-15T16:07:58.000Z
|
from pydantic import BaseModel
from .utils import BaseEvent
class MainPublisherEvent(BaseEvent):
pass
class CheckStatus(MainPublisherEvent):
channel: str
class WaitLiveVideo(MainPublisherEvent):
pass
class WaitStream(MainPublisherEvent):
time: int
class DownloaderEvent(BaseEvent):
pass
class StartDownloading(DownloaderEvent):
id: str
class PlaylistUpdate(DownloaderEvent):
total_size: int
to_load: int
class DownloadedChunk(DownloaderEvent):
pass
class StopDownloading(DownloaderEvent):
pass
class DownloadingProgress(BaseModel): # type: ignore
total_segments: int = 0
total_downloaded_segments: int = 0
last_chunk_size: int = 0
downloaded_segments: int = 0
def chunk_loaded(self) -> None:
self.downloaded_segments += 1
self.total_downloaded_segments += 1
class ExceptionEvent(BaseEvent):
message: str
| 16.232143
| 53
| 0.733773
|
from pydantic import BaseModel
from .utils import BaseEvent
class MainPublisherEvent(BaseEvent):
pass
class CheckStatus(MainPublisherEvent):
channel: str
class WaitLiveVideo(MainPublisherEvent):
pass
class WaitStream(MainPublisherEvent):
time: int
class DownloaderEvent(BaseEvent):
pass
class StartDownloading(DownloaderEvent):
id: str
class PlaylistUpdate(DownloaderEvent):
total_size: int
to_load: int
class DownloadedChunk(DownloaderEvent):
pass
class StopDownloading(DownloaderEvent):
pass
class DownloadingProgress(BaseModel):
total_segments: int = 0
total_downloaded_segments: int = 0
last_chunk_size: int = 0
downloaded_segments: int = 0
def chunk_loaded(self) -> None:
self.downloaded_segments += 1
self.total_downloaded_segments += 1
class ExceptionEvent(BaseEvent):
message: str
| true
| true
|
790b8f0decceafeb9f1f47457358a8dcf0fa3542
| 237
|
py
|
Python
|
05_tcp_ip_tricks/Sniffer Detection.py
|
mumbo-pro/network-penetration
|
30fcc70a0bdb094e2339951785d4d72b0373a71f
|
[
"MIT"
] | 3
|
2020-07-25T13:36:02.000Z
|
2021-06-03T19:59:13.000Z
|
05_tcp_ip_tricks/Sniffer Detection.py
|
mumbo-pro/understanding-network-hacks
|
30fcc70a0bdb094e2339951785d4d72b0373a71f
|
[
"MIT"
] | null | null | null |
05_tcp_ip_tricks/Sniffer Detection.py
|
mumbo-pro/understanding-network-hacks
|
30fcc70a0bdb094e2339951785d4d72b0373a71f
|
[
"MIT"
] | 2
|
2019-07-12T10:04:23.000Z
|
2019-07-18T17:57:59.000Z
|
ifconfig -a | grep PROMISC
cat /var/log/messages |grep promisc
1 #!/usr/bin/python 2 3 import sys 4 from scapy.all import promiscping 5 6 if len(sys.argv) < 2: 7 print sys.argv[0] + " <net>" 8 sys.exit() 9 10 promiscping(sys.argv[1])
| 47.4
| 171
| 0.691983
|
ifconfig -a | grep PROMISC
cat /var/log/messages |grep promisc
1
| false
| true
|
790b8fb7a4fa3fba8749496732d8f5fe914da627
| 905
|
py
|
Python
|
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv/Affine Transformation/shearing.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | null | null | null |
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv/Affine Transformation/shearing.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | null | null | null |
OpenCV-Computer-Vision-Examples-with-Python-A-Complete-Guide-for-Dummies-master/Source Code/opencv/Affine Transformation/shearing.py
|
Payal197bhadra/ComputerVision
|
d66b5037ece99b6189dd4306b2c9be67cffd14af
|
[
"MIT"
] | null | null | null |
import cv2
import matplotlib.pyplot as plt
import numpy as np
img= cv2.imread("img.png")
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.axis('off')
# show the image
plt.imshow(img)
plt.show()
# get the image shape
rows, cols, dim = img.shape
rows, cols, dim = img.shape
# transformation matrix for Shearing
# shearing applied to x-axis
M1 = np.float32([[1, 0.5, 0],
[0, 1 , 0],
[0, 0 , 1]])
# shearing applied to y-axis
M2 = np.float32([[1, 0, 0],
[0.5, 1, 0],
[0, 0, 1]])
# apply a perspective transformation to the image
sheared_img_in_x = cv2.warpPerspective(img,M1,(int(cols*1.5),int(rows*1.5)))
sheared_img_in_y = cv2.warpPerspective(img,M2,(int(cols*1.5),int(rows*1.5)))
# disable x & y axis
plt.axis('off')
# show the resulting image
plt.subplot(121)
plt.imshow(sheared_img_in_x)
plt.subplot(122)
plt.imshow(sheared_img_in_y)
plt.show()
| 27.424242
| 76
| 0.658564
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
img= cv2.imread("img.png")
img=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.axis('off')
plt.imshow(img)
plt.show()
rows, cols, dim = img.shape
rows, cols, dim = img.shape
M1 = np.float32([[1, 0.5, 0],
[0, 1 , 0],
[0, 0 , 1]])
M2 = np.float32([[1, 0, 0],
[0.5, 1, 0],
[0, 0, 1]])
sheared_img_in_x = cv2.warpPerspective(img,M1,(int(cols*1.5),int(rows*1.5)))
sheared_img_in_y = cv2.warpPerspective(img,M2,(int(cols*1.5),int(rows*1.5)))
plt.axis('off')
plt.subplot(121)
plt.imshow(sheared_img_in_x)
plt.subplot(122)
plt.imshow(sheared_img_in_y)
plt.show()
| true
| true
|
790b8fbd0e53a9a03d4b620835dbc1a50bfd23cc
| 6,553
|
py
|
Python
|
datameta_client_lib/model/staged_meta_data_sets.py
|
ghga-de/datameta-client-lib
|
85c8900c26d092a929db6c5b0bd6b89cdea9a176
|
[
"Apache-2.0"
] | null | null | null |
datameta_client_lib/model/staged_meta_data_sets.py
|
ghga-de/datameta-client-lib
|
85c8900c26d092a929db6c5b0bd6b89cdea9a176
|
[
"Apache-2.0"
] | 1
|
2021-03-15T18:42:36.000Z
|
2021-03-15T18:42:36.000Z
|
datameta_client_lib/model/staged_meta_data_sets.py
|
ghga-de/datameta-client-lib
|
85c8900c26d092a929db6c5b0bd6b89cdea9a176
|
[
"Apache-2.0"
] | null | null | null |
"""
DataMeta
DataMeta # noqa: E501
The version of the OpenAPI document: 1.4.0
Contact: leon.kuchenbecker@uni-tuebingen.de
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from datameta_client_lib.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class StagedMetaDataSets(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'metadataset_ids': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'metadataset_ids': 'metadatasetIds', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, metadataset_ids, *args, **kwargs): # noqa: E501
"""StagedMetaDataSets - a model defined in OpenAPI
Args:
metadataset_ids ([str]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.metadataset_ids = metadataset_ids
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 38.321637
| 110
| 0.584923
|
import re
import sys
from datameta_client_lib.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class StagedMetaDataSets(ModelNormal):
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
return {
'metadataset_ids': ([str],),
}
@cached_property
def discriminator():
return None
attribute_map = {
'metadataset_ids': 'metadatasetIds',
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, metadataset_ids, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.metadataset_ids = metadataset_ids
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
| true
| true
|
790b907d340166b927df243cbe28d86d3deb503e
| 25,162
|
py
|
Python
|
src/bot.py
|
SergeyKonnov/walbot
|
28923523299bd18b47074915c8209833683d0b8c
|
[
"MIT"
] | null | null | null |
src/bot.py
|
SergeyKonnov/walbot
|
28923523299bd18b47074915c8209833683d0b8c
|
[
"MIT"
] | null | null | null |
src/bot.py
|
SergeyKonnov/walbot
|
28923523299bd18b47074915c8209833683d0b8c
|
[
"MIT"
] | null | null | null |
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
# Sightly patched implementation from discord.py discord.Client (parent) class
# Reference: https://github.com/Rapptz/discord.py/blob/master/discord/client.py
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread): # Inherit parent channel settings for threads
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
# Check whether bot is already running
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
# Some variable initializations
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
# Handle --nohup flag
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
# Selecting YAML parser
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
# Saving application pd in order to safely stop it later
BotCache(main_bot).dump_to_file()
# Executing patch tool if it is necessary
if args.patch:
cmd = f"'{sys.executable}' '{os.path.dirname(__file__) + '/../tools/patch.py'}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
# Read configuration files
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
# Check available backups
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
# Restore Markov model from backup
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
# Check config versions
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
# Checking authentication token
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
# Constructing bot instance
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
# Starting the bot
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
# After stopping the bot
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.path.dirname(os.path.dirname(__file__)) + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
# Reference to the original solution:
# https://stackoverflow.com/a/64357453
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
| 46.596296
| 118
| 0.604443
|
import asyncio
import datetime
import importlib
import itertools
import os
import random
import re
import shutil
import signal
import subprocess
import sys
import time
import zipfile
import discord
import psutil
from src import const
from src.algorithms import levenshtein_distance
from src.bc import DoNotUpdateFlag
from src.bot_cache import BotCache
from src.bot_instance import BotInstance
from src.config import Command, Config, GuildSettings, SecretConfig, User, bc
from src.embed import DiscordEmbed
from src.emoji import get_clock_emoji
from src.ff import FF
from src.info import BotInfo
from src.log import log
from src.mail import Mail
from src.markov import Markov
from src.message import Msg
from src.reminder import Reminder
from src.repl import Repl
from src.utils import Util
from src.voice import VoiceRoutine
class WalBot(discord.Client):
def __init__(self, name: str, config: Config, secret_config: SecretConfig, intents: discord.Intents) -> None:
super().__init__(intents=intents)
self.repl = None
bc.instance_name = self.instance_name = name
self.config = config
self.secret_config = secret_config
self.bot_cache = BotCache(True)
self.loop.create_task(self._process_reminders())
self.loop.create_task(VoiceRoutine(self.bot_cache).start())
self.loop.create_task(self._repl_routine())
bc.config = self.config
bc.commands = self.config.commands
bc.background_loop = self.loop
bc.latency = lambda: self.latency
bc.change_status = self._change_status
bc.change_presence = self.change_presence
bc.close = self.close
bc.secret_config = self.secret_config
bc.info = BotInfo()
bc.plugin_manager.register()
bc.fetch_channel = self.fetch_channel
if not bc.args.fast_start:
log.debug("Started Markov model checks...")
if bc.markov.check():
log.info("Markov model has passed all checks")
else:
log.info("Markov model has not passed checks, but all errors were fixed")
async def _bot_runner_task(self, *args, **kwargs):
try:
await self.start(*args, **kwargs)
finally:
if not self.is_closed():
await self.close()
def run(self, *args, **kwargs):
loop = self.loop
try:
loop.add_signal_handler(signal.SIGINT, lambda: loop.stop())
loop.add_signal_handler(signal.SIGTERM, lambda: loop.stop())
except NotImplementedError:
pass
asyncio.ensure_future(self._bot_runner_task(*args, *kwargs), loop=loop)
try:
loop.run_forever()
except KeyboardInterrupt:
loop.stop()
log.info('Received signal to terminate bot and event loop')
log.info("Shutting down the bot...")
tasks = {t for t in asyncio.all_tasks(loop=loop) if not t.done()}
for task in tasks:
task.cancel()
loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
for task in tasks:
if not task.cancelled():
log.error("Asynchronous task cancel failed!")
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(self._on_shutdown())
loop.close()
log.info("Bot is shut down!")
async def _on_shutdown(self) -> None:
if self.repl is not None:
self.repl.stop()
for event in bc.background_events:
event.cancel()
bc.background_loop = None
await bc.plugin_manager.broadcast_command("close")
@Mail.send_exception_info_to_admin_emails_async
async def _precompile(self) -> None:
log.debug("Started precompiling functions...")
levenshtein_distance("", "")
log.debug("Finished precompiling functions")
async def _change_status(self, string: str, type_: discord.ActivityType) -> None:
await self.change_presence(activity=discord.Activity(name=string, type=type_))
async def _config_autosave(self) -> None:
await self.wait_until_ready()
index = 1
while not self.is_closed():
await asyncio.sleep(self.config.saving["period"] * 60)
if index % self.config.saving["backup"]["period"] == 0:
self.config.backup(const.CONFIG_PATH, const.MARKOV_PATH)
self.config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH)
index += 1
async def _process_reminders_iteration(self) -> None:
log.debug3("Reminder processing iteration has started")
now = datetime.datetime.now().replace(second=0).strftime(const.REMINDER_DATETIME_FORMAT)
to_remove = []
to_append = []
reminder_do_not_update_flag = False
for key, rem in self.config.reminders.items():
for i in range(len(rem.prereminders_list)):
prereminder = rem.prereminders_list[i]
used_prereminder = rem.used_prereminders_list[i]
if prereminder == 0 or used_prereminder:
continue
prereminder_time = (
datetime.datetime.now().replace(second=0) + datetime.timedelta(minutes=prereminder))
if rem == prereminder_time.strftime(const.REMINDER_DATETIME_FORMAT):
channel = self.get_channel(rem.channel_id)
e = DiscordEmbed()
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e.title(f"{prereminder} minutes left until reminder")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(
datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=prereminder))
e.footer(text=rem.author)
await channel.send("", embed=e.get())
rem.used_prereminders_list[i] = True
if rem == now:
channel = self.get_channel(rem.channel_id)
clock_emoji = get_clock_emoji(datetime.datetime.now().strftime("%H:%M"))
e = DiscordEmbed()
e.title(f"{clock_emoji} You asked to remind")
e.description(rem.message + "\n" + rem.notes)
e.color(random.randint(0x000000, 0xffffff))
e.timestamp(datetime.datetime.now(datetime.timezone.utc))
e.footer(text=rem.author)
await channel.send(' '.join(rem.ping_users if rem.ping_users else ""), embed=e.get())
for user_id in rem.whisper_users:
await Msg.send_direct_message(
self.get_user(user_id), f"You asked to remind at {now} -> {rem.message}", False)
if rem.email_users:
mail = Mail(self.secret_config)
mail.send(
rem.email_users,
f"Reminder: {rem.message}",
f"You asked to remind at {now} -> {rem.message}")
if rem.repeat_after > 0:
new_time = datetime.datetime.now().replace(second=0, microsecond=0) + rem.get_next_event_delta()
new_time = new_time.strftime(const.REMINDER_DATETIME_FORMAT)
to_append.append(
Reminder(str(new_time), rem.message, rem.channel_id, rem.author, rem.time_created))
to_append[-1].repeat_after = rem.repeat_after
to_append[-1].repeat_interval_measure = rem.repeat_interval_measure
to_append[-1].prereminders_list = rem.prereminders_list
to_append[-1].used_prereminders_list = [False] * len(rem.prereminders_list)
to_append[-1].notes = rem.notes
log.debug2(f"Scheduled renew of recurring reminder - old id: {key}")
to_remove.append(key)
elif rem < now:
log.debug2(f"Scheduled reminder with id {key} removal")
to_remove.append(key)
else:
prereminders_delay = 0
if rem.prereminders_list:
prereminders_delay = max(rem.prereminders_list)
if ((datetime.datetime.strptime(rem.time, const.REMINDER_DATETIME_FORMAT) - datetime.datetime.now())
< datetime.timedelta(minutes=(5 + prereminders_delay / 60))):
reminder_do_not_update_flag = True
bc.do_not_update[DoNotUpdateFlag.REMINDER] = reminder_do_not_update_flag
for key in to_remove:
self.config.reminders.pop(key)
for item in to_append:
key = self.config.ids["reminder"]
self.config.reminders[key] = item
self.config.ids["reminder"] += 1
log.debug3("Reminder processing iteration has finished")
@Mail.send_exception_info_to_admin_emails_async
async def _process_reminders(self) -> None:
await self.wait_until_ready()
while not self.is_closed():
await self._process_reminders_iteration()
await asyncio.sleep(const.REMINDER_POLLING_INTERVAL)
async def _repl_routine(self) -> None:
self.repl = Repl(self.config.repl["port"])
await self.repl.start()
@Mail.send_exception_info_to_admin_emails_async
async def on_ready(self) -> None:
self._load_plugins()
log.info(
f"Logged in as: {self.user.name} {self.user.id} ({self.__class__.__name__}), "
f"instance: {self.instance_name}")
self.bot_cache.update({
"ready": True,
})
self.bot_cache.dump_to_file()
bc.guilds = self.guilds
for guild in self.guilds:
if guild.id not in self.config.guilds.keys():
self.config.guilds[guild.id] = GuildSettings(guild.id)
bc.bot_user = self.user
self.loop.create_task(self._config_autosave())
self.loop.create_task(self._precompile())
def _load_plugins(self) -> None:
for plugin_name in bc.plugin_manager.get_plugins_list():
if plugin_name not in self.config.plugins.keys():
self.config.plugins[plugin_name] = {
"autostart": False,
}
for plugin_name, plugin_state in self.config.plugins.items():
if plugin_state["autostart"]:
asyncio.create_task(bc.plugin_manager.send_command(plugin_name, "init"))
@Mail.send_exception_info_to_admin_emails_async
async def on_message(self, message: discord.Message) -> None:
await bc.plugin_manager.broadcast_command("on_message", message)
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
else:
await self._process_regular_message(message)
await self._process_repetitions(message)
@Mail.send_exception_info_to_admin_emails_async
async def on_message_edit(self, old_message: discord.Message, message: discord.Message) -> None:
if message.embeds != old_message.embeds:
log.info(f"<{message.id}> (edit, embed update) {message.author} -> {message.content}")
return
if self.config.guilds[message.channel.guild.id].ignored:
return
bc.message_buffer.push(message)
log.info(f"<{message.id}> (edit) {message.author} -> {message.content}")
if message.author.id == self.user.id:
return
if isinstance(message.channel, discord.DMChannel):
return
if message.channel.guild.id is None:
return
if self.config.guilds[message.channel.guild.id].is_whitelisted:
if message.channel.id not in self.config.guilds[message.channel.guild.id].whitelist:
return
if message.author.id not in self.config.users.keys():
self.config.users[message.author.id] = User(message.author.id)
if self.config.users[message.author.id].permission_level < 0:
return
if message.content.startswith(self.config.commands_prefix):
await self._process_command(message)
async def _process_repetitions(self, message: discord.Message) -> None:
m = tuple(bc.message_buffer.get(message.channel.id, i) for i in range(3))
if (all(m) and m[0].content and m[0].content == m[1].content == m[2].content and
(m[0].author.id != self.user.id and
m[1].author.id != self.user.id and
m[2].author.id != self.user.id)):
await message.channel.send(m[0].content)
async def _process_regular_message(self, message: discord.Message) -> None:
channel_id = message.channel.id
if isinstance(message.channel, discord.Thread):
channel_id = message.channel.parent_id
if (self.user.mentioned_in(message) or self.user.id in [
member.id for member in list(
itertools.chain(*[role.members for role in message.role_mentions]))]):
if channel_id in self.config.guilds[message.channel.guild.id].markov_responses_whitelist:
result = await self.config.disable_pings_in_response(message, bc.markov.generate())
await message.channel.send(message.author.mention + ' ' + result)
elif channel_id in self.config.guilds[message.channel.guild.id].markov_logging_whitelist:
needs_to_be_added = True
for ignored_prefix in bc.markov.ignored_prefixes.values():
if message.content.startswith(ignored_prefix):
needs_to_be_added = False
break
if needs_to_be_added:
bc.markov.add_string(message.content)
if channel_id in self.config.guilds[message.channel.guild.id].responses_whitelist:
responses_count = 0
for response in self.config.responses.values():
if responses_count >= const.MAX_BOT_RESPONSES_ON_ONE_MESSAGE:
break
if re.search(response.regex, message.content):
text = await Command.process_subcommands(
response.text, message, self.config.users[message.author.id])
await Msg.reply(message, text, False)
responses_count += 1
if channel_id in self.config.guilds[message.channel.guild.id].reactions_whitelist:
for reaction in self.config.reactions.values():
if re.search(reaction.regex, message.content):
log.info("Added reaction " + reaction.emoji)
try:
await message.add_reaction(reaction.emoji)
except discord.HTTPException:
pass
async def _process_command(self, message: discord.Message) -> None:
command = message.content.split(' ')
command = list(filter(None, command))
command[0] = command[0][1:]
if not command[0]:
return log.debug("Ignoring empty command")
if command[0] not in self.config.commands.data.keys():
if command[0] in self.config.commands.aliases.keys():
command[0] = self.config.commands.aliases[command[0]]
else:
await message.channel.send(
f"Unknown command '{command[0]}', "
f"probably you meant '{self._suggest_similar_command(command[0])}'")
return
if command[0] not in (
"poll",
"timer",
"stopwatch",
"vqpush",
):
timeout_error, _ = await Util.run_function_with_time_limit(
self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id]),
const.MAX_COMMAND_EXECUTION_TIME)
if command[0] not in (
"silent",
) and timeout_error:
await message.channel.send(f"Command '{' '.join(command)}' took too long to execute")
else:
await self.config.commands.data[command[0]].run(message, command, self.config.users[message.author.id])
def _suggest_similar_command(self, unknown_command: str) -> str:
min_dist = 100000
suggestion = ""
for command in self.config.commands.data.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
for command in self.config.commands.aliases.keys():
dist = levenshtein_distance(unknown_command, command)
if dist < min_dist:
suggestion = command
min_dist = dist
return suggestion
async def on_raw_message_edit(self, payload: discord.RawMessageUpdateEvent) -> None:
try:
log.info(f"<{payload.message_id}> (raw_edit) {payload.data['author']['username']}#"
f"{payload.data['author']['discriminator']} -> {payload.data['content']}")
except KeyError:
pass
async def on_raw_message_delete(self, payload: discord.RawMessageDeleteEvent) -> None:
log.info(f"<{payload.message_id}> (delete)")
class DiscordBotInstance(BotInstance):
def start(self, args, main_bot=True):
bot_cache = BotCache(main_bot).parse()
if bot_cache is not None:
pid = bot_cache["pid"]
if pid is not None and psutil.pid_exists(pid):
return log.error("Bot is already running!")
config = None
secret_config = None
bc.restart_flag = False
bc.args = args
if sys.platform in ("linux", "darwin") and args.nohup:
fd = os.open(const.NOHUP_FILE_PATH, os.O_WRONLY | os.O_CREAT | os.O_APPEND)
log.info(f"Output is redirected to {const.NOHUP_FILE_PATH}")
os.dup2(fd, sys.stdout.fileno())
os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
os.close(fd)
signal.signal(signal.SIGHUP, signal.SIG_IGN)
bc.yaml_loader, bc.yaml_dumper = Util.get_yaml(verbose=True)
BotCache(main_bot).dump_to_file()
if args.patch:
cmd = f"'{sys.executable}' '{os.path.dirname(__file__) + '/../tools/patch.py'}' all"
log.info("Executing patch tool: " + cmd)
subprocess.call(cmd)
config = Util.read_config_file(const.CONFIG_PATH)
if config is None:
config = Config()
secret_config = Util.read_config_file(const.SECRET_CONFIG_PATH)
if secret_config is None:
secret_config = SecretConfig()
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None and os.path.isdir("backup"):
markov_backups = sorted(
[x for x in os.listdir("backup") if x.startswith("markov_") and x.endswith(".zip")])
if markov_backups:
with zipfile.ZipFile("backup/" + markov_backups[-1], 'r') as zip_ref:
zip_ref.extractall(".")
log.info(f"Restoring Markov model from backup/{markov_backups[-1]}")
shutil.move(markov_backups[-1][:-4], "markov.yaml")
bc.markov = Util.read_config_file(const.MARKOV_PATH)
if bc.markov is None:
bc.markov = Markov()
log.warning("Failed to restore Markov model from backup. Creating new Markov model...")
if bc.markov is None:
bc.markov = Markov()
log.info("Created empty Markov model")
ok = True
ok &= Util.check_version(
"discord.py", discord.__version__, const.DISCORD_LIB_VERSION,
solutions=[
"execute: python -m pip install -r requirements.txt",
])
ok &= Util.check_version(
"Config", config.version, const.CONFIG_VERSION,
solutions=[
"run patch tool",
"remove config.yaml (settings will be lost!)",
])
ok &= Util.check_version(
"Markov config", bc.markov.version, const.MARKOV_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove markov.yaml (Markov model will be lost!)",
])
ok &= Util.check_version(
"Secret config", secret_config.version, const.SECRET_CONFIG_VERSION,
solutions=[
"run patch tool",
"remove secret.yaml (your Discord authentication token will be lost!)",
])
if main_bot and not ok:
sys.exit(const.ExitStatus.CONFIG_FILE_ERROR)
config.commands.update()
if secret_config.token is None:
secret_config = SecretConfig()
if not FF.is_enabled("WALBOT_FEATURE_NEW_CONFIG"):
secret_config.token = input("Enter your token: ")
if main_bot:
intents = discord.Intents.all()
walbot = WalBot(args.name, config, secret_config, intents=intents)
else:
walbot = importlib.import_module("src.minibot").MiniWalBot(args.name, config, secret_config, args.message)
try:
walbot.run(secret_config.token)
except discord.errors.PrivilegedIntentsRequired:
log.error("Privileged Gateway Intents are not enabled! Shutting down the bot...")
log.info("Bot is disconnected!")
if main_bot:
config.save(const.CONFIG_PATH, const.MARKOV_PATH, const.SECRET_CONFIG_PATH, wait=True)
BotCache(main_bot).remove()
if bc.restart_flag:
cmd = f"'{sys.executable}' '{os.path.dirname(os.path.dirname(__file__)) + '/walbot.py'}' start"
log.info("Calling: " + cmd)
if sys.platform in ("linux", "darwin"):
fork = os.fork()
if fork == 0:
subprocess.call(cmd)
elif fork > 0:
log.info("Stopping current instance of the bot")
sys.exit(const.ExitStatus.NO_ERROR)
else:
subprocess.call(cmd)
def stop(self, _, main_bot=True):
if not BotCache(main_bot).exists():
return log.error("Could not stop the bot (cache file does not exist)")
bot_cache = BotCache(main_bot).parse()
pid = bot_cache["pid"]
if pid is None:
return log.error("Could not stop the bot (cache file does not contain pid)")
if psutil.pid_exists(pid):
if sys.platform == "win32":
import ctypes
kernel = ctypes.windll.kernel32
kernel.FreeConsole()
kernel.AttachConsole(pid)
kernel.SetConsoleCtrlHandler(None, 1)
kernel.GenerateConsoleCtrlEvent(0, 0)
else:
os.kill(pid, signal.SIGINT)
while psutil.pid_exists(pid):
log.debug("Bot is still running. Please, wait...")
time.sleep(0.5)
log.info("Bot is stopped!")
else:
log.error("Could not stop the bot (bot is not running)")
BotCache(main_bot).remove()
| true
| true
|
790b908662cae9042a56ea78e88cdb535fd5fe3b
| 8,577
|
py
|
Python
|
test/testutils.py
|
idoby/SimpleParsing
|
ed8170a32e7765bd98ed42831a428f0cdb645b67
|
[
"MIT"
] | null | null | null |
test/testutils.py
|
idoby/SimpleParsing
|
ed8170a32e7765bd98ed42831a428f0cdb645b67
|
[
"MIT"
] | null | null | null |
test/testutils.py
|
idoby/SimpleParsing
|
ed8170a32e7765bd98ed42831a428f0cdb645b67
|
[
"MIT"
] | null | null | null |
import shlex
import string
import sys
from contextlib import contextmanager
from typing import Any, Callable, Generic, List, Optional, Tuple, Type, TypeVar, cast
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, DashVariant, ParsingError
from simple_parsing.utils import camel_case
from simple_parsing.wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
def xfail_param(*args, reason: str):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return pytest.param(*args, marks=pytest.mark.xfail(reason=reason))
Dataclass = TypeVar("Dataclass")
@contextmanager
def raises(exception=ParsingError, match=None, code: int = None):
with pytest.raises(exception, match=match):
yield
from io import StringIO
from contextlib import redirect_stderr
@contextmanager
def exits_and_writes_to_stderr(match: str = ""):
s = StringIO()
with redirect_stderr(s), raises(SystemExit):
yield
s.seek(0)
err_string = s.read()
if match:
assert match in err_string, err_string
else:
assert err_string, err_string
@contextmanager
def raises_missing_required_arg():
with exits_and_writes_to_stderr("the following arguments are required"):
yield
@contextmanager
def raises_expected_n_args(n: int):
with exits_and_writes_to_stderr(f"expected {n} arguments"):
yield
@contextmanager
def raises_unrecognized_args(*args: str):
with exits_and_writes_to_stderr("unrecognized arguments: " + " ".join(args or [])):
yield
def assert_help_output_equals(actual: str, expected: str) -> None:
# Replace the start with `prog`, since the test runner might not always be
# `pytest`, could also be __main__ when debugging with VSCode
prog = sys.argv[0].split("/")[-1]
if prog != "pytest":
expected = expected.replace("usage: pytest", f"usage: {prog}")
remove = string.punctuation + string.whitespace
if "optional arguments" in expected and sys.version_info[:2] >= (3, 10):
expected = expected.replace("optional arguments", "options")
actual_str = "".join(actual.split())
actual_str = actual.translate(str.maketrans("", "", remove))
expected_str = expected.translate(str.maketrans("", "", remove))
assert actual_str == expected_str, "\n" + "\n".join([actual_str, expected_str])
T = TypeVar("T")
class TestParser(simple_parsing.ArgumentParser, Generic[T]):
__test__ = False
""" A parser subclass just used for testing.
Makes the retrieval of the arguments a bit easier to read.
"""
def __init__(self, *args, **kwargs):
self._current_dest = None
self._current_dataclass = None
super().__init__(*args, **kwargs)
def add_arguments(self, dataclass: Type, dest, prefix="", default=None):
if self._current_dest == dest and self._current_dataclass == dataclass:
return # already added arguments for that dataclass.
self._current_dest = dest
self._current_dataclass = dataclass
return super().add_arguments(dataclass, dest, prefix=prefix, default=default)
def __call__(self, args: str) -> T:
namespace = self.parse_args(shlex.split(args))
value = getattr(namespace, self._current_dest)
value = cast(T, value)
return value
class TestSetup:
@classmethod
def setup(
cls: Type[Dataclass],
arguments: Optional[str] = "",
dest: Optional[str] = None,
default: Optional[Dataclass] = None,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
parse_known_args: bool = False,
attempt_to_reorder: bool = False,
*,
argument_generation_mode: ArgumentGenerationMode = ArgumentGenerationMode.FLAT,
nested_mode: NestedMode = NestedMode.DEFAULT,
) -> Dataclass:
"""Basic setup for a test.
Keyword Arguments:
arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})
dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})
Returns:
{cls}} -- the class's type.
"""
parser = simple_parsing.ArgumentParser(
conflict_resolution=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
argument_generation_mode=argument_generation_mode,
nested_mode=nested_mode,
)
if dest is None:
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if arguments is None:
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
args, unknown_args = parser.parse_known_args(
splits, attempt_to_reorder=attempt_to_reorder
)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest) # type: ignore
delattr(args, dest)
# If there are subgroups, we can allow an extra "subgroups" attribute, otherwise we don't
# expect any other arguments.
args_dict = vars(args).copy()
args_dict.pop("subgroups", None)
assert not args_dict, f"Namespace has leftover garbage values (besides subgroups): {args}"
instance = cast(Dataclass, instance)
return instance
@classmethod
def setup_multiple(
cls: Type[Dataclass], num_to_parse: int, arguments: Optional[str] = ""
) -> Tuple[Dataclass, ...]:
conflict_resolution_mode: ConflictResolution = ConflictResolution.ALWAYS_MERGE
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode)
class_name = camel_case(cls.__name__)
for i in range(num_to_parse):
parser.add_arguments(cls, f"{class_name}_{i}")
if arguments is None:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
args = parser.parse_args(splits)
return tuple(getattr(args, f"{class_name}_{i}") for i in range(num_to_parse))
@classmethod
def get_help_text(
cls,
argv: Optional[str] = None,
multiple=False,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants=DashVariant.AUTO,
**parser_kwargs,
) -> str:
import contextlib
from io import StringIO
f = StringIO()
if argv is None:
argv = "--help"
elif not argv.endswith("--help"):
argv = argv + " --help"
with contextlib.suppress(SystemExit), contextlib.redirect_stdout(f):
_ = cls.setup(
argv,
conflict_resolution_mode=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
**parser_kwargs,
)
s = f.getvalue()
return s
ListFormattingFunction = Callable[[List[Any]], str]
ListOfListsFormattingFunction = Callable[[List[List[Any]]], str]
def format_list_using_spaces(value_list: List[Any]) -> str:
return " ".join(str(p) for p in value_list)
def format_list_using_brackets(value_list: List[Any]) -> str:
return f"[{','.join(str(p) for p in value_list)}]"
def format_list_using_single_quotes(value_list: List[Any]) -> str:
return f"'{format_list_using_spaces(value_list)}'"
def format_list_using_double_quotes(value_list: List[Any]) -> str:
return f'"{format_list_using_spaces(value_list)}"'
def format_lists_using_brackets(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_brackets(value_list) for value_list in list_of_lists)
def format_lists_using_double_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_double_quotes(value_list) for value_list in list_of_lists)
def format_lists_using_single_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_single_quotes(value_list) for value_list in list_of_lists)
| 33.767717
| 104
| 0.670164
|
import shlex
import string
import sys
from contextlib import contextmanager
from typing import Any, Callable, Generic, List, Optional, Tuple, Type, TypeVar, cast
import pytest
import simple_parsing
from simple_parsing import ConflictResolution, DashVariant, ParsingError
from simple_parsing.utils import camel_case
from simple_parsing.wrappers.field_wrapper import ArgumentGenerationMode, NestedMode
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
def xfail_param(*args, reason: str):
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
return pytest.param(*args, marks=pytest.mark.xfail(reason=reason))
Dataclass = TypeVar("Dataclass")
@contextmanager
def raises(exception=ParsingError, match=None, code: int = None):
with pytest.raises(exception, match=match):
yield
from io import StringIO
from contextlib import redirect_stderr
@contextmanager
def exits_and_writes_to_stderr(match: str = ""):
s = StringIO()
with redirect_stderr(s), raises(SystemExit):
yield
s.seek(0)
err_string = s.read()
if match:
assert match in err_string, err_string
else:
assert err_string, err_string
@contextmanager
def raises_missing_required_arg():
with exits_and_writes_to_stderr("the following arguments are required"):
yield
@contextmanager
def raises_expected_n_args(n: int):
with exits_and_writes_to_stderr(f"expected {n} arguments"):
yield
@contextmanager
def raises_unrecognized_args(*args: str):
with exits_and_writes_to_stderr("unrecognized arguments: " + " ".join(args or [])):
yield
def assert_help_output_equals(actual: str, expected: str) -> None:
prog = sys.argv[0].split("/")[-1]
if prog != "pytest":
expected = expected.replace("usage: pytest", f"usage: {prog}")
remove = string.punctuation + string.whitespace
if "optional arguments" in expected and sys.version_info[:2] >= (3, 10):
expected = expected.replace("optional arguments", "options")
actual_str = "".join(actual.split())
actual_str = actual.translate(str.maketrans("", "", remove))
expected_str = expected.translate(str.maketrans("", "", remove))
assert actual_str == expected_str, "\n" + "\n".join([actual_str, expected_str])
T = TypeVar("T")
class TestParser(simple_parsing.ArgumentParser, Generic[T]):
__test__ = False
def __init__(self, *args, **kwargs):
self._current_dest = None
self._current_dataclass = None
super().__init__(*args, **kwargs)
def add_arguments(self, dataclass: Type, dest, prefix="", default=None):
if self._current_dest == dest and self._current_dataclass == dataclass:
return
self._current_dest = dest
self._current_dataclass = dataclass
return super().add_arguments(dataclass, dest, prefix=prefix, default=default)
def __call__(self, args: str) -> T:
namespace = self.parse_args(shlex.split(args))
value = getattr(namespace, self._current_dest)
value = cast(T, value)
return value
class TestSetup:
@classmethod
def setup(
cls: Type[Dataclass],
arguments: Optional[str] = "",
dest: Optional[str] = None,
default: Optional[Dataclass] = None,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants: DashVariant = DashVariant.AUTO,
parse_known_args: bool = False,
attempt_to_reorder: bool = False,
*,
argument_generation_mode: ArgumentGenerationMode = ArgumentGenerationMode.FLAT,
nested_mode: NestedMode = NestedMode.DEFAULT,
) -> Dataclass:
parser = simple_parsing.ArgumentParser(
conflict_resolution=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
argument_generation_mode=argument_generation_mode,
nested_mode=nested_mode,
)
if dest is None:
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if arguments is None:
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
args, unknown_args = parser.parse_known_args(
splits, attempt_to_reorder=attempt_to_reorder
)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest)
delattr(args, dest)
# expect any other arguments.
args_dict = vars(args).copy()
args_dict.pop("subgroups", None)
assert not args_dict, f"Namespace has leftover garbage values (besides subgroups): {args}"
instance = cast(Dataclass, instance)
return instance
@classmethod
def setup_multiple(
cls: Type[Dataclass], num_to_parse: int, arguments: Optional[str] = ""
) -> Tuple[Dataclass, ...]:
conflict_resolution_mode: ConflictResolution = ConflictResolution.ALWAYS_MERGE
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode)
class_name = camel_case(cls.__name__)
for i in range(num_to_parse):
parser.add_arguments(cls, f"{class_name}_{i}")
if arguments is None:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
args = parser.parse_args(splits)
return tuple(getattr(args, f"{class_name}_{i}") for i in range(num_to_parse))
@classmethod
def get_help_text(
cls,
argv: Optional[str] = None,
multiple=False,
conflict_resolution_mode: ConflictResolution = ConflictResolution.AUTO,
add_option_string_dash_variants=DashVariant.AUTO,
**parser_kwargs,
) -> str:
import contextlib
from io import StringIO
f = StringIO()
if argv is None:
argv = "--help"
elif not argv.endswith("--help"):
argv = argv + " --help"
with contextlib.suppress(SystemExit), contextlib.redirect_stdout(f):
_ = cls.setup(
argv,
conflict_resolution_mode=conflict_resolution_mode,
add_option_string_dash_variants=add_option_string_dash_variants,
**parser_kwargs,
)
s = f.getvalue()
return s
ListFormattingFunction = Callable[[List[Any]], str]
ListOfListsFormattingFunction = Callable[[List[List[Any]]], str]
def format_list_using_spaces(value_list: List[Any]) -> str:
return " ".join(str(p) for p in value_list)
def format_list_using_brackets(value_list: List[Any]) -> str:
return f"[{','.join(str(p) for p in value_list)}]"
def format_list_using_single_quotes(value_list: List[Any]) -> str:
return f"'{format_list_using_spaces(value_list)}'"
def format_list_using_double_quotes(value_list: List[Any]) -> str:
return f'"{format_list_using_spaces(value_list)}"'
def format_lists_using_brackets(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_brackets(value_list) for value_list in list_of_lists)
def format_lists_using_double_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_double_quotes(value_list) for value_list in list_of_lists)
def format_lists_using_single_quotes(list_of_lists: List[List[Any]]) -> str:
return " ".join(format_list_using_single_quotes(value_list) for value_list in list_of_lists)
| true
| true
|
790b90b30d402f9cc6bdb0c0b8cce8daf19fb973
| 7,732
|
py
|
Python
|
test/functional/abandonconflict.py
|
youngseoka/youngseokcoin_half
|
ae63c8ec8e94d19197c3c365a436d70a22a0cfeb
|
[
"MIT"
] | null | null | null |
test/functional/abandonconflict.py
|
youngseoka/youngseokcoin_half
|
ae63c8ec8e94d19197c3c365a436d70a22a0cfeb
|
[
"MIT"
] | 1
|
2020-05-21T00:57:53.000Z
|
2020-05-21T00:57:53.000Z
|
test/functional/abandonconflict.py
|
youngseoka/youngseokcoin_half
|
ae63c8ec8e94d19197c3c365a436d70a22a0cfeb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Youngseokcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the abandontransaction RPC.
The abandontransaction RPC marks a transaction and all its in-wallet
descendants as abandoned which allows their inputs to be respent. It can be
used to replace "stuck" or evicted transactions. It only works on transactions
which are not included in a block and are not currently in the mempool. It has
no effect on transactions which are already conflicted or abandoned.
"""
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001")) #no more than fees lost
balance = newbalance
# Disconnect nodes so node0's transactions don't get into node1's mempool
disconnect_nodes(self.nodes[0], 1)
# Identify the 10ysc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10ysc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998ysc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 YSC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 YSC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| 48.628931
| 137
| 0.662442
|
from test_framework.test_framework import YoungseokcoinTestFramework
from test_framework.util import *
class AbandonConflictTest(YoungseokcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001"], []]
def run_test(self):
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.001"))
balance = newbalance
disconnect_nodes(self.nodes[0], 1)
# Identify the 10ysc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10ysc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998ysc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
# Verify txs no longer in either node's mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
self.stop_node(0)
self.start_node(0, extra_args=["-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 YSC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# Invalidate the block with the double spend and B's 10 YSC output should no longer be available
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
self.log.info(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| true
| true
|
790b91273bac667d95710a4535755a287da3ae43
| 5,444
|
py
|
Python
|
camper/twit/models.py
|
drinks/camper
|
82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3
|
[
"BSD-3-Clause"
] | null | null | null |
camper/twit/models.py
|
drinks/camper
|
82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3
|
[
"BSD-3-Clause"
] | null | null | null |
camper/twit/models.py
|
drinks/camper
|
82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from camper.sked.models import Event, Session
from camper.twit.threads import SendTweetThread
class TweetTooLongError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'Adding this session would result in a tweet longer than 140 characters.'
class AlreadyAssignedError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'This session already belongs to a tweet in this sequence.'
class Tweet(models.Model):
sent_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
def send(self):
# ''' This is weird. It can only be called from the first tweet in
# a series, raising NotImplementedError if called on a non-initial tweet.
# It spins off a thread to make the actual api calls, which
# manages state within the series.
# '''
if self.previous:
raise NotImplementedError('Serial tweets can only be sent from the beginning.')
SendTweetThread(self).start()
@property
def is_sent(self):
return self.sent_at is not None
class SessionBlockTweetManager(models.Manager):
def unsent(qs):
return qs.filter(sent_at=None, previous=None)
class SessionBlockTweet(Tweet):
timeslot = models.DateTimeField()
event = models.ForeignKey(Event, related_name="session_tweets")
session_ids = models.CommaSeparatedIntegerField(max_length=128,
blank=True, default="")
previous = models.OneToOneField('SessionBlockTweet', blank=True,
null=True, unique=True, related_name="next")
objects = SessionBlockTweetManager()
class Meta:
ordering = ('-timeslot', 'id')
def __unicode__(self):
try:
return 'Tweet %s of %s for %s at %s' % (
self.index + 1, self.total, self.timeslot, self.event)
except:
return 'Tweet for %s at %s' % (self.timeslot, self.event)
def touch(self):
self._seq = None
self._sessions = None
def get_sequence(self):
try:
if self._seq is not None:
return self._seq
except AttributeError:
pass
seq = []
cursor = self
while cursor.previous:
cursor = cursor.previous
seq.append(cursor)
while True:
try:
cursor = cursor.next
seq.append(cursor)
except SessionBlockTweet.DoesNotExist:
break
self._seq = seq
return self.get_sequence()
def first_in_sequence(self):
seq = self.get_sequence()
return seq[0]
def get_session_ids(self):
try:
return [int(id) for id in self.session_ids.split(',')]
except:
return []
def add_session(self, session):
if self.length < 140:
assigned = [id for tweet in self.get_sequence() for id in tweet.get_session_ids()]
if session.id in assigned:
raise AlreadyAssignedError()
locally_assigned = self.get_session_ids()
locally_assigned.append(session.id)
self.session_ids = ','.join([str(id) for id in locally_assigned])
self.touch()
if self.length > 140:
if self.sessions.count() > 1:
self.remove_session(session)
raise TweetTooLongError()
else:
raise TweetTooLongError()
def remove_session(self, session):
self.session_ids = ','.join([str(id) for
id in self.get_session_ids() if
id != session.id])
self.touch()
@property
def sessions(self):
try:
if self._sessions is not None:
return self._sessions
except AttributeError:
pass
try:
self._sessions = Session.objects.filter(id__in=self.get_session_ids())
except ValueError:
self._sessions = Session.objects.none()
return self.sessions
@property
def index(self):
seq = self.get_sequence()
return seq.index(self)
@property
def is_first(self):
return self.previous is None
@property
def is_last(self):
try:
return self.next is None
except SessionBlockTweet.DoesNotExist:
return True
@property
def total(self):
seq = self.get_sequence()
return len(seq)
@property
def text(self):
txt = u''
if self.is_first:
txt += u'Coming up at %s: ' % (self.timeslot
.astimezone(timezone.get_current_timezone())
.strftime('%-I:%M'))
txt += u', '.join(['%s (%s)' % (truncatechars(s.title, 120) if
self.sessions.count() is 1 else
s.title, s.location.name) for
s in self.sessions])
return txt
@property
def length(self):
return len(self.text)
| 30.58427
| 96
| 0.560434
|
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from camper.sked.models import Event, Session
from camper.twit.threads import SendTweetThread
class TweetTooLongError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'Adding this session would result in a tweet longer than 140 characters.'
class AlreadyAssignedError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'This session already belongs to a tweet in this sequence.'
class Tweet(models.Model):
sent_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
def send(self):
# a series, raising NotImplementedError if called on a non-initial tweet.
# It spins off a thread to make the actual api calls, which
# manages state within the series.
# '''
if self.previous:
raise NotImplementedError('Serial tweets can only be sent from the beginning.')
SendTweetThread(self).start()
@property
def is_sent(self):
return self.sent_at is not None
class SessionBlockTweetManager(models.Manager):
def unsent(qs):
return qs.filter(sent_at=None, previous=None)
class SessionBlockTweet(Tweet):
timeslot = models.DateTimeField()
event = models.ForeignKey(Event, related_name="session_tweets")
session_ids = models.CommaSeparatedIntegerField(max_length=128,
blank=True, default="")
previous = models.OneToOneField('SessionBlockTweet', blank=True,
null=True, unique=True, related_name="next")
objects = SessionBlockTweetManager()
class Meta:
ordering = ('-timeslot', 'id')
def __unicode__(self):
try:
return 'Tweet %s of %s for %s at %s' % (
self.index + 1, self.total, self.timeslot, self.event)
except:
return 'Tweet for %s at %s' % (self.timeslot, self.event)
def touch(self):
self._seq = None
self._sessions = None
def get_sequence(self):
try:
if self._seq is not None:
return self._seq
except AttributeError:
pass
seq = []
cursor = self
while cursor.previous:
cursor = cursor.previous
seq.append(cursor)
while True:
try:
cursor = cursor.next
seq.append(cursor)
except SessionBlockTweet.DoesNotExist:
break
self._seq = seq
return self.get_sequence()
def first_in_sequence(self):
seq = self.get_sequence()
return seq[0]
def get_session_ids(self):
try:
return [int(id) for id in self.session_ids.split(',')]
except:
return []
def add_session(self, session):
if self.length < 140:
assigned = [id for tweet in self.get_sequence() for id in tweet.get_session_ids()]
if session.id in assigned:
raise AlreadyAssignedError()
locally_assigned = self.get_session_ids()
locally_assigned.append(session.id)
self.session_ids = ','.join([str(id) for id in locally_assigned])
self.touch()
if self.length > 140:
if self.sessions.count() > 1:
self.remove_session(session)
raise TweetTooLongError()
else:
raise TweetTooLongError()
def remove_session(self, session):
self.session_ids = ','.join([str(id) for
id in self.get_session_ids() if
id != session.id])
self.touch()
@property
def sessions(self):
try:
if self._sessions is not None:
return self._sessions
except AttributeError:
pass
try:
self._sessions = Session.objects.filter(id__in=self.get_session_ids())
except ValueError:
self._sessions = Session.objects.none()
return self.sessions
@property
def index(self):
seq = self.get_sequence()
return seq.index(self)
@property
def is_first(self):
return self.previous is None
@property
def is_last(self):
try:
return self.next is None
except SessionBlockTweet.DoesNotExist:
return True
@property
def total(self):
seq = self.get_sequence()
return len(seq)
@property
def text(self):
txt = u''
if self.is_first:
txt += u'Coming up at %s: ' % (self.timeslot
.astimezone(timezone.get_current_timezone())
.strftime('%-I:%M'))
txt += u', '.join(['%s (%s)' % (truncatechars(s.title, 120) if
self.sessions.count() is 1 else
s.title, s.location.name) for
s in self.sessions])
return txt
@property
def length(self):
return len(self.text)
| true
| true
|
790b9400e23a410d37cd19c544b89676d2c7a2ae
| 1,319
|
py
|
Python
|
beanborg/bb_mover.py
|
StefanD986/beanborg
|
5578b8e6f68deb3f7cf0c7e5fce396f681fe99bc
|
[
"BSD-2-Clause"
] | null | null | null |
beanborg/bb_mover.py
|
StefanD986/beanborg
|
5578b8e6f68deb3f7cf0c7e5fce396f681fe99bc
|
[
"BSD-2-Clause"
] | null | null | null |
beanborg/bb_mover.py
|
StefanD986/beanborg
|
5578b8e6f68deb3f7cf0c7e5fce396f681fe99bc
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__copyright__ = "Copyright (C) 2021 Luciano Fiandesio"
__license__ = "GNU GPLv2"
import argparse
import os
import sys
import glob
import yaml
from config import *
from arg_parser import *
def main():
args = eval_args('Move bank csv file to processing folder')
config = init_config(args.file, args.debug)
if not os.path.isdir(config.csv.download_path):
print("folder: %s does not exist!"%(config.csv.download_path))
sys.exit(-1)
if not os.path.isdir(config.csv.target):
os.mkdir(config.csv.target)
# count number of files starting with:
file_count = len(glob.glob1(config.csv.download_path, config.csv.name + "*"))
if file_count > 1:
print("more than one file starting with %s found in %s. Can not continue."%(config.csv.name,config.csv.download_path))
sys.exit(-1)
if file_count == 0:
print("No file found in %s with name starting with: %s"%(config.csv.download_path, config.csv.name))
sys.exit(-1)
for f in os.listdir(config.csv.download_path):
if f.startswith(config.csv.name):
os.rename(config.csv.download_path + "/" + f, config.csv.target + "/" + config.csv.ref + ".csv")
print("Done :) ")
if __name__ == "__main__":
main()
| 27.479167
| 126
| 0.648976
|
__copyright__ = "Copyright (C) 2021 Luciano Fiandesio"
__license__ = "GNU GPLv2"
import argparse
import os
import sys
import glob
import yaml
from config import *
from arg_parser import *
def main():
args = eval_args('Move bank csv file to processing folder')
config = init_config(args.file, args.debug)
if not os.path.isdir(config.csv.download_path):
print("folder: %s does not exist!"%(config.csv.download_path))
sys.exit(-1)
if not os.path.isdir(config.csv.target):
os.mkdir(config.csv.target)
file_count = len(glob.glob1(config.csv.download_path, config.csv.name + "*"))
if file_count > 1:
print("more than one file starting with %s found in %s. Can not continue."%(config.csv.name,config.csv.download_path))
sys.exit(-1)
if file_count == 0:
print("No file found in %s with name starting with: %s"%(config.csv.download_path, config.csv.name))
sys.exit(-1)
for f in os.listdir(config.csv.download_path):
if f.startswith(config.csv.name):
os.rename(config.csv.download_path + "/" + f, config.csv.target + "/" + config.csv.ref + ".csv")
print("Done :) ")
if __name__ == "__main__":
main()
| true
| true
|
790b940ed96894b131d6fbb1b7bb18fae26e8fdf
| 3,245
|
py
|
Python
|
salt/transport/__init__.py
|
otrempe/salt
|
28d3ecc261f3528a830ae60b715469f2894123df
|
[
"Apache-2.0"
] | null | null | null |
salt/transport/__init__.py
|
otrempe/salt
|
28d3ecc261f3528a830ae60b715469f2894123df
|
[
"Apache-2.0"
] | null | null | null |
salt/transport/__init__.py
|
otrempe/salt
|
28d3ecc261f3528a830ae60b715469f2894123df
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Encapsulate the different transports available to Salt. Currently this is only ZeroMQ.
'''
import salt.payload
import salt.auth
class Channel(object):
@staticmethod
def factory(opts, **kwargs):
# Default to ZeroMQ for now
ttype = 'zeromq'
if 'transport_type' in opts:
ttype = opts['transport_type']
elif 'transport_type' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport_type']
if ttype == 'zeromq':
return ZeroMQChannel(opts, **kwargs)
else:
raise Exception("Channels are only defined for ZeroMQ")
# return NewKindOfChannel(opts, **kwargs)
class ZeroMQChannel(Channel):
'''
Encapsulate sending routines to ZeroMQ.
ZMQ Channels default to 'crypt=aes'
'''
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = 'zeromq'
# crypt defaults to 'aes'
self.crypt = kwargs['crypt'] if 'crypt' in kwargs else 'aes'
self.serial = salt.payload.Serial(opts)
if self.crypt != 'clear':
if 'auth' in kwargs:
self.auth = kwargs['auth']
else:
self.auth = salt.crypt.SAuth(opts)
if 'master_uri' in kwargs:
master_uri = kwargs['master_uri']
else:
master_uri = opts['master_uri']
self.sreq = salt.payload.SREQ(master_uri)
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
ret = self.sreq.send('aes', self.auth.crypticle.dumps(load), tries, timeout)
key = self.auth.get_keys()
aes = key.private_decrypt(ret['key'], 4)
pcrypt = salt.crypt.Crypticle(self.opts, aes)
return pcrypt.loads(ret[dictkey])
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
def _do_transfer():
data = self.sreq.send(
self.crypt,
self.auth.crypticle.dumps(load),
tries,
timeout)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
return data
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
def _uncrypted_transfer(self, load, tries=3, timeout=60):
return self.sreq.send(self.crypt, load, tries, timeout)
def send(self, load, tries=3, timeout=60):
if self.crypt != 'clear':
return self._crypted_transfer(load, tries, timeout)
else:
return self._uncrypted_transfer(load, tries, timeout)
# Do we ever do non-crypted transfers?
| 33.453608
| 89
| 0.589522
|
import salt.payload
import salt.auth
class Channel(object):
@staticmethod
def factory(opts, **kwargs):
ttype = 'zeromq'
if 'transport_type' in opts:
ttype = opts['transport_type']
elif 'transport_type' in opts.get('pillar', {}).get('master', {}):
ttype = opts['pillar']['master']['transport_type']
if ttype == 'zeromq':
return ZeroMQChannel(opts, **kwargs)
else:
raise Exception("Channels are only defined for ZeroMQ")
class ZeroMQChannel(Channel):
def __init__(self, opts, **kwargs):
self.opts = opts
self.ttype = 'zeromq'
self.crypt = kwargs['crypt'] if 'crypt' in kwargs else 'aes'
self.serial = salt.payload.Serial(opts)
if self.crypt != 'clear':
if 'auth' in kwargs:
self.auth = kwargs['auth']
else:
self.auth = salt.crypt.SAuth(opts)
if 'master_uri' in kwargs:
master_uri = kwargs['master_uri']
else:
master_uri = opts['master_uri']
self.sreq = salt.payload.SREQ(master_uri)
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
ret = self.sreq.send('aes', self.auth.crypticle.dumps(load), tries, timeout)
key = self.auth.get_keys()
aes = key.private_decrypt(ret['key'], 4)
pcrypt = salt.crypt.Crypticle(self.opts, aes)
return pcrypt.loads(ret[dictkey])
def _crypted_transfer(self, load, tries=3, timeout=60):
def _do_transfer():
data = self.sreq.send(
self.crypt,
self.auth.crypticle.dumps(load),
tries,
timeout)
if data:
data = self.auth.crypticle.loads(data)
return data
try:
return _do_transfer()
except salt.crypt.AuthenticationError:
self.auth = salt.crypt.SAuth(self.opts)
return _do_transfer()
def _uncrypted_transfer(self, load, tries=3, timeout=60):
return self.sreq.send(self.crypt, load, tries, timeout)
def send(self, load, tries=3, timeout=60):
if self.crypt != 'clear':
return self._crypted_transfer(load, tries, timeout)
else:
return self._uncrypted_transfer(load, tries, timeout)
| true
| true
|
790b949f5954819ff37e1d8839a955e82e4336d0
| 6,918
|
py
|
Python
|
playback/templates/linuxbridge_agent_ini.py
|
jiasir/playback
|
58b2a5d669dcfaa8cad50c544a4b068dcacf9b69
|
[
"MIT"
] | 6
|
2015-05-09T10:39:54.000Z
|
2017-07-02T21:19:42.000Z
|
playback/templates/linuxbridge_agent_ini.py
|
jiasir/playback
|
58b2a5d669dcfaa8cad50c544a4b068dcacf9b69
|
[
"MIT"
] | 20
|
2015-06-10T05:02:42.000Z
|
2022-03-29T21:54:07.000Z
|
playback/templates/linuxbridge_agent_ini.py
|
jiasir/playback
|
58b2a5d669dcfaa8cad50c544a4b068dcacf9b69
|
[
"MIT"
] | 6
|
2015-03-25T06:13:38.000Z
|
2016-04-08T02:22:05.000Z
|
conf_linuxbridge_agent_ini = """[DEFAULT]
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
# If set to false, the logging level will be set to WARNING instead of the
# default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
physical_interface_mappings = provider:{{ public_interface }}
# Example: physical_interface_mappings = physnet1:eth1
[vxlan]
# (BoolOpt) enable VXLAN on the agent
# VXLAN support can be enabled when agent is managed by ml2 plugin using
# linuxbridge mechanism driver.
enable_vxlan = True
#
# (IntOpt) use specific TTL for vxlan interface protocol packets
# ttl =
#
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group or group range to use for broadcast emulation.
# Specifying a range allows different VNIs to use different group addresses,
# reducing or eliminating spurious broadcast traffic to the tunnel endpoints.
# Ranges are specified by using CIDR notation. To reserve a unique group for
# each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8.
# This setting must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)
local_ip = {{ local_ip }}
#
# (BoolOpt) Flag to enable l2population extension. This option should be used
# in conjunction with ml2 plugin l2population mechanism driver (in that case,
# both linuxbridge and l2population mechanism drivers should be loaded).
# It enables plugin to populate VXLAN forwarding table, in order to limit
# the use of broadcast emulation (multicast will be turned off if kernel and
# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
l2_population = True
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
# SIGTERM. If value is set to 0, rpc timeout won't be changed.
#
# quitting_rpc_timeout = 10
prevent_arp_spoofing = True
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
"""
| 42.703704
| 414
| 0.775513
|
conf_linuxbridge_agent_ini = """[DEFAULT]
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
#debug = false
# If set to false, the logging level will be set to WARNING instead of the
# default INFO level. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#verbose = true
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, logging_context_format_string). (string value)
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = true
# Format string to use for log messages with context. (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. (string
# value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message
# is DEBUG. (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. (string value)
#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
[linux_bridge]
# (ListOpt) Comma-separated list of
# <physical_network>:<physical_interface> tuples mapping physical
# network names to the agent's node-specific physical network
# interfaces to be used for flat and VLAN networks. All physical
# networks listed in network_vlan_ranges on the server should have
# mappings to appropriate interfaces on each agent.
#
physical_interface_mappings = provider:{{ public_interface }}
# Example: physical_interface_mappings = physnet1:eth1
[vxlan]
# (BoolOpt) enable VXLAN on the agent
# VXLAN support can be enabled when agent is managed by ml2 plugin using
# linuxbridge mechanism driver.
enable_vxlan = True
#
# (IntOpt) use specific TTL for vxlan interface protocol packets
# ttl =
#
# (IntOpt) use specific TOS for vxlan interface protocol packets
# tos =
#
# (StrOpt) multicast group or group range to use for broadcast emulation.
# Specifying a range allows different VNIs to use different group addresses,
# reducing or eliminating spurious broadcast traffic to the tunnel endpoints.
# Ranges are specified by using CIDR notation. To reserve a unique group for
# each possible (24-bit) VNI, use a /8 such as 239.0.0.0/8.
# This setting must be the same on all the agents.
# vxlan_group = 224.0.0.1
#
# (StrOpt) Local IP address to use for VXLAN endpoints (required)
local_ip = {{ local_ip }}
#
# (BoolOpt) Flag to enable l2population extension. This option should be used
# in conjunction with ml2 plugin l2population mechanism driver (in that case,
# both linuxbridge and l2population mechanism drivers should be loaded).
# It enables plugin to populate VXLAN forwarding table, in order to limit
# the use of broadcast emulation (multicast will be turned off if kernel and
# iproute2 supports unicast flooding - requires 3.11 kernel and iproute2 3.10)
l2_population = True
[agent]
# Agent's polling interval in seconds
# polling_interval = 2
# (IntOpt) Set new timeout in seconds for new rpc calls after agent receives
# SIGTERM. If value is set to 0, rpc timeout won't be changed.
#
# quitting_rpc_timeout = 10
prevent_arp_spoofing = True
[securitygroup]
# Firewall driver for realizing neutron security group function
# firewall_driver = neutron.agent.firewall.NoopFirewallDriver
# Example: firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# Controls if neutron security group is enabled or not.
# It should be false when you use nova security group.
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
"""
| true
| true
|
790b94a900934e0ad8a2a66407e25cbac9504749
| 2,233
|
py
|
Python
|
src/data/prepare_dataset.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | null | null | null |
src/data/prepare_dataset.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | 10
|
2021-03-30T14:17:16.000Z
|
2022-03-12T00:50:30.000Z
|
src/data/prepare_dataset.py
|
vinodrajendran001/aerial_segmentation
|
44ce909fe1f6f218930c50825ce7452c8029b20f
|
[
"FTL"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
from glob import glob
import json
import random
import shutil
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#prepare dataset
with open(os.path.join(ROOT_DIR, 'config.json')) as json_file:
config = json.load(json_file)
train_test_split = config['data']['train_percentage']
process_path = os.path.join(ROOT_DIR, 'data', 'processed')
# train path
image_train_path = os.path.join(process_path, 'train', 'images')
label_train_path = os.path.join(process_path, 'train', 'labels')
# test path
image_test_path = os.path.join(process_path, 'test', 'images')
label_test_path = os.path.join(process_path, 'test', 'labels')
#create directories
if not os.path.exists(image_train_path):
os.makedirs(image_train_path)
if not os.path.exists(label_train_path):
os.makedirs(label_train_path)
if not os.path.exists(image_test_path):
os.makedirs(image_test_path)
if not os.path.exists(label_test_path):
os.makedirs(label_test_path)
# check existence of interim dataset
images_interim_path = os.path.join(ROOT_DIR, 'data', 'interim', 'images')
labels_interim_path = os.path.join(ROOT_DIR, 'data', 'interim', 'labels')
if not os.path.exists(images_interim_path) and not os.path.exists(labels_interim_path):
print ("Please run the make_dataset script to process the dataset before run this script")
else:
dl_image_path = glob(images_interim_path + "/*.png")
dl_label_path = glob(labels_interim_path + "/*.png")
pairs = list(zip(dl_image_path, dl_label_path))
split = len(dl_image_path) * (train_test_split/100)
train_set = pairs[:int(split)]
test_set = pairs[int(split):]
for train_file in train_set:
shutil.copy(train_file[0], image_train_path)
shutil.copy(train_file[1], label_train_path)
for test_file in test_set:
shutil.copy(test_file[0], image_test_path)
shutil.copy(test_file[1], label_test_path)
print ("Train and Test set are prepared successfully")
| 31.9
| 98
| 0.673086
|
import os
from glob import glob
import json
import random
import shutil
if __name__ == '__main__':
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
with open(os.path.join(ROOT_DIR, 'config.json')) as json_file:
config = json.load(json_file)
train_test_split = config['data']['train_percentage']
process_path = os.path.join(ROOT_DIR, 'data', 'processed')
image_train_path = os.path.join(process_path, 'train', 'images')
label_train_path = os.path.join(process_path, 'train', 'labels')
image_test_path = os.path.join(process_path, 'test', 'images')
label_test_path = os.path.join(process_path, 'test', 'labels')
if not os.path.exists(image_train_path):
os.makedirs(image_train_path)
if not os.path.exists(label_train_path):
os.makedirs(label_train_path)
if not os.path.exists(image_test_path):
os.makedirs(image_test_path)
if not os.path.exists(label_test_path):
os.makedirs(label_test_path)
images_interim_path = os.path.join(ROOT_DIR, 'data', 'interim', 'images')
labels_interim_path = os.path.join(ROOT_DIR, 'data', 'interim', 'labels')
if not os.path.exists(images_interim_path) and not os.path.exists(labels_interim_path):
print ("Please run the make_dataset script to process the dataset before run this script")
else:
dl_image_path = glob(images_interim_path + "/*.png")
dl_label_path = glob(labels_interim_path + "/*.png")
pairs = list(zip(dl_image_path, dl_label_path))
split = len(dl_image_path) * (train_test_split/100)
train_set = pairs[:int(split)]
test_set = pairs[int(split):]
for train_file in train_set:
shutil.copy(train_file[0], image_train_path)
shutil.copy(train_file[1], label_train_path)
for test_file in test_set:
shutil.copy(test_file[0], image_test_path)
shutil.copy(test_file[1], label_test_path)
print ("Train and Test set are prepared successfully")
| true
| true
|
790b955027ed9a346ae2c965e9a7544530060b67
| 1,019
|
py
|
Python
|
cli/actions/factory.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | 1
|
2021-11-14T05:52:21.000Z
|
2021-11-14T05:52:21.000Z
|
cli/actions/factory.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | null | null | null |
cli/actions/factory.py
|
daneshvar-amrollahi/polar
|
b72254e1a8354e6a10135cd3990b8edfda02559e
|
[
"MIT"
] | null | null | null |
from argparse import Namespace
from .simulation_action import Action, SimulationAction
from .plot_action import PlotAction
from .gram_charlier_action import GramCharlierAction
from .cornish_fisher_action import CornishFisherAction
from .mc_combination_action import MCCombinationAction
from .print_benchmark_action import PrintBenchmarkAction
from .goals_action import GoalsAction
class ActionFactory:
@classmethod
def create_action(cls, cli_args: Namespace) -> Action:
if cli_args.simulate:
return SimulationAction(cli_args)
if cli_args.goals or cli_args.invariants:
return GoalsAction(cli_args)
if cli_args.plot:
return PlotAction(cli_args)
if cli_args.gram_charlier:
return GramCharlierAction(cli_args)
if cli_args.cornish_fisher:
return CornishFisherAction(cli_args)
if cli_args.mc_comb is not None:
return MCCombinationAction(cli_args)
return PrintBenchmarkAction(cli_args)
| 35.137931
| 58
| 0.747792
|
from argparse import Namespace
from .simulation_action import Action, SimulationAction
from .plot_action import PlotAction
from .gram_charlier_action import GramCharlierAction
from .cornish_fisher_action import CornishFisherAction
from .mc_combination_action import MCCombinationAction
from .print_benchmark_action import PrintBenchmarkAction
from .goals_action import GoalsAction
class ActionFactory:
@classmethod
def create_action(cls, cli_args: Namespace) -> Action:
if cli_args.simulate:
return SimulationAction(cli_args)
if cli_args.goals or cli_args.invariants:
return GoalsAction(cli_args)
if cli_args.plot:
return PlotAction(cli_args)
if cli_args.gram_charlier:
return GramCharlierAction(cli_args)
if cli_args.cornish_fisher:
return CornishFisherAction(cli_args)
if cli_args.mc_comb is not None:
return MCCombinationAction(cli_args)
return PrintBenchmarkAction(cli_args)
| true
| true
|
790b955bc79139bf552cbd934667ff270d4d1e1c
| 12,479
|
py
|
Python
|
run/cmu_runner.py
|
Droliven/MSRGCN
|
5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e
|
[
"MIT"
] | 28
|
2021-08-21T12:02:12.000Z
|
2022-03-07T03:54:55.000Z
|
run/cmu_runner.py
|
Droliven/MSRGCN
|
5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e
|
[
"MIT"
] | 6
|
2021-09-07T03:05:51.000Z
|
2022-02-24T03:00:04.000Z
|
run/cmu_runner.py
|
Droliven/MSRGCN
|
5d8d8e3365d3b23ca2ac734ace7e84135a6e3a9e
|
[
"MIT"
] | 6
|
2021-08-21T12:02:16.000Z
|
2021-11-22T14:22:57.000Z
|
#!/usr/bin/env python
# encoding: utf-8
'''
@project : MSRGCN
@file : cmu_runner.py
@author : Droliven
@contact : droliven@163.com
@ide : PyCharm
@time : 2021-07-28 13:29
'''
from datas import CMUMotionDataset, get_dct_matrix, reverse_dct_torch, define_actions_cmu, draw_pic_gt_pred
from nets import MSRGCN, MSRGCNShortTerm
from configs.config import Config
from torch.utils.data import DataLoader
import torch.optim as optim
import torch
import os
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from pprint import pprint
def L2NormLoss_test(gt, out, frame_ids): # (batch size,feature dim, seq len)
'''
gt: B, 66, 25
'''
t_3d = np.zeros(len(frame_ids))
batch_size, features, seq_len = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3) # B, 25, 22, 3
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = torch.mean(torch.norm(gt[:, j, :, :].contiguous().view(-1, 3) - out[:, j, :, :].contiguous().view(-1, 3), 2, 1)).cpu().data.numpy() * batch_size
return t_3d
def L2NormLoss_train(gt, out):
'''
# (batch size,feature dim, seq len)
等同于 mpjpe_error_p3d()
'''
batch_size, _, seq_len = gt.shape
gt = gt.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm(gt - out, 2, dim=-1))
return loss
def lr_decay(optimizer, lr_now, gamma):
lr = lr_now * gamma
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class CMURunner():
def __init__(self, exp_name="cmu", input_n=10, output_n=10, dct_n=15, device="cuda:0", num_works=0, test_manner="all", debug_step=1):
super(CMURunner, self).__init__()
# 参数
self.start_epoch = 1
self.best_accuracy = 1e15
self.cfg = Config(exp_name=exp_name, input_n=input_n, output_n=output_n, dct_n=dct_n, device=device, num_works=num_works, test_manner=test_manner)
print("\n================== Configs =================")
pprint(vars(self.cfg), indent=4)
print("==========================================\n")
with open(os.path.join(self.cfg.ckpt_dir, "config.txt"), 'w', encoding='utf-8') as f:
f.write(str(self.cfg.__dict__))
# 模型
if self.cfg.output_n == 25:
self.model = MSRGCN(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
elif self.cfg.output_n == 10:
self.model = MSRGCNShortTerm(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
if self.cfg.device != "cpu":
self.model.cuda(self.cfg.device)
print(">>> total params: {:.2f}M\n".format(
sum(p.numel() for p in self.model.parameters()) / 1000000.0))
self.lr = self.cfg.lr
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
# 数据
dct_m, i_dct_m = get_dct_matrix(self.cfg.seq_len)
self.dct_m = torch.from_numpy(dct_m).float()
self.i_dct_m = torch.from_numpy(i_dct_m).float()
if self.cfg.device != "cpu":
self.dct_m = self.dct_m.cuda(self.cfg.device, non_blocking=True)
self.i_dct_m = self.i_dct_m.cuda(self.cfg.device, non_blocking=True)
train_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions="all", mode_name="train", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=0, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=0, global_min=0, device=self.cfg.device, debug_step=debug_step)
print("train data shape {}".format(train_dataset.gt_all_scales['p32'].shape[0]))
self.train_loader = DataLoader(
dataset=train_dataset,
batch_size=self.cfg.train_batch_size,
shuffle=True,
num_workers=self.cfg.num_works,
pin_memory=True)
self.global_max = train_dataset.global_max
self.global_min = train_dataset.global_min
self.test_loader = dict()
for act in define_actions_cmu("all"):
test_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=act, mode_name="test", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=1, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=self.global_max, global_min=self.global_min, device=self.cfg.device, debug_step=debug_step)
self.test_loader[act] = DataLoader(
dataset=test_dataset,
batch_size=self.cfg.test_batch_size,
shuffle=False,
num_workers=self.cfg.num_works,
pin_memory=True)
print(">>> test {} data {}".format(act, test_dataset.gt_all_scales['p32'].shape[0]))
self.summary = SummaryWriter(self.cfg.ckpt_dir)
def save(self, checkpoint_path, best_err, curr_err):
state = {
"lr": self.lr,
"best_err": best_err,
"curr_err": curr_err,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
torch.save(state, checkpoint_path)
def restore(self, checkpoint_path):
state = torch.load(checkpoint_path, map_location=self.cfg.device)
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.lr = state["lr"]
best_err = state['best_err']
curr_err = state["curr_err"]
print("load from lr {}, curr_avg {}, best_avg {}.".format(state["lr"], curr_err, best_err))
def train(self, epoch):
self.model.train()
average_loss = 0
for i, (inputs, gts) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
# skip the last batch if only have one sample for batch_norm layers
if b == 1:
continue
self.global_step = (epoch - 1) * len(self.train_loader) + i + 1
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
outputs = self.model(inputs)
losses = None
for k in outputs:
# 反 Norm
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# loss
loss_curr = L2NormLoss_train(gts[k], outputs[k])
if losses is None:
losses = loss_curr
else:
losses = losses + loss_curr
self.summary.add_scalar(f"Loss/{k}", loss_curr, self.global_step)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
average_loss += losses.cpu().data.numpy()
average_loss /= (i + 1)
return average_loss
def test(self, epoch=0):
self.model.eval()
frame_ids = self.cfg.frame_ids
total_loss = np.zeros((len(define_actions_cmu("all")), len(frame_ids)))
for act_idx, act in enumerate(define_actions_cmu("all")):
count = 0
for i, (inputs, gts) in enumerate(self.test_loader[act]):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
with torch.no_grad():
outputs = self.model(inputs)
# 反 Norm
for k in outputs:
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
# 回转空间
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
# 开始计算
mygt = gts['p32'].view(-1, self.cfg.origin_noden, 3, self.cfg.seq_len).clone()
myout = outputs['p22'].view(-1, self.cfg.final_out_noden, 3, self.cfg.seq_len)
mygt[:, self.cfg.dim_used_3d, :, :] = myout
mygt[:, self.cfg.dim_repeat_32, :, :] = myout[:, self.cfg.dim_repeat_22, :, :]
mygt = mygt.view(-1, self.cfg.origin_noden*3, self.cfg.seq_len)
loss = L2NormLoss_test(gts['p32'][:, :, self.cfg.input_n:], mygt[:, :, self.cfg.input_n:], self.cfg.frame_ids)
total_loss[act_idx] += loss
# count += 1
count += mygt.shape[0]
# ************ 画图
if act_idx == 0 and i == 0:
pred_seq = outputs['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
gt_seq = gts['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
for t in range(self.cfg.seq_len):
draw_pic_gt_pred(gt_seq[:, :, t], pred_seq[:, :, t], self.cfg.I22_plot, self.cfg.J22_plot, self.cfg.LR22_plot, os.path.join(self.cfg.ckpt_dir, "images", f"{epoch}_{act}_{t}.png"))
total_loss[act_idx] /= count
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/{act}/{frame}", total_loss[act_idx][fidx], epoch)
self.summary.add_scalar("Test/average", np.mean(total_loss), epoch)
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/avg{frame}", np.mean(total_loss[:, fidx]), epoch)
return total_loss
def run(self):
for epoch in range(self.start_epoch, self.cfg.n_epoch + 1):
if epoch % 2 == 0:
self.lr = lr_decay(self.optimizer, self.lr, self.cfg.lr_decay)
self.summary.add_scalar("LR", self.lr, epoch)
average_train_loss = self.train(epoch)
if average_train_loss < self.best_accuracy:
self.best_accuracy = average_train_loss
self.save(
os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_best_epoch{}_err{:.4f}.pth'.format(self.cfg.exp_name,
self.cfg.input_n,
self.cfg.output_n,
self.cfg.dct_n, epoch,
average_train_loss)), self.best_accuracy, average_train_loss)
self.save(os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_last.pth'.format(self.cfg.exp_name, self.cfg.input_n,
self.cfg.output_n, self.cfg.dct_n)),
self.best_accuracy, average_train_loss)
if epoch % 1 == 0:
loss_l2_test = self.test(epoch)
print('Epoch: {}, LR: {}, Current err test avg: {}'.format(epoch, self.lr, np.mean(loss_l2_test)))
if __name__ == '__main__':
pass
| 45.378182
| 215
| 0.550845
|
from datas import CMUMotionDataset, get_dct_matrix, reverse_dct_torch, define_actions_cmu, draw_pic_gt_pred
from nets import MSRGCN, MSRGCNShortTerm
from configs.config import Config
from torch.utils.data import DataLoader
import torch.optim as optim
import torch
import os
from tensorboardX import SummaryWriter
import numpy as np
from tqdm import tqdm
from pprint import pprint
def L2NormLoss_test(gt, out, frame_ids):
t_3d = np.zeros(len(frame_ids))
batch_size, features, seq_len = gt.shape
gt = gt.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3)
out = out.permute(0, 2, 1).contiguous().view(batch_size, seq_len, -1, 3)
for k in np.arange(0, len(frame_ids)):
j = frame_ids[k]
t_3d[k] = torch.mean(torch.norm(gt[:, j, :, :].contiguous().view(-1, 3) - out[:, j, :, :].contiguous().view(-1, 3), 2, 1)).cpu().data.numpy() * batch_size
return t_3d
def L2NormLoss_train(gt, out):
batch_size, _, seq_len = gt.shape
gt = gt.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
out = out.view(batch_size, -1, 3, seq_len).permute(0, 3, 1, 2).contiguous()
loss = torch.mean(torch.norm(gt - out, 2, dim=-1))
return loss
def lr_decay(optimizer, lr_now, gamma):
lr = lr_now * gamma
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
class CMURunner():
def __init__(self, exp_name="cmu", input_n=10, output_n=10, dct_n=15, device="cuda:0", num_works=0, test_manner="all", debug_step=1):
super(CMURunner, self).__init__()
self.start_epoch = 1
self.best_accuracy = 1e15
self.cfg = Config(exp_name=exp_name, input_n=input_n, output_n=output_n, dct_n=dct_n, device=device, num_works=num_works, test_manner=test_manner)
print("\n================== Configs =================")
pprint(vars(self.cfg), indent=4)
print("==========================================\n")
with open(os.path.join(self.cfg.ckpt_dir, "config.txt"), 'w', encoding='utf-8') as f:
f.write(str(self.cfg.__dict__))
if self.cfg.output_n == 25:
self.model = MSRGCN(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
elif self.cfg.output_n == 10:
self.model = MSRGCNShortTerm(self.cfg.p_dropout, self.cfg.leaky_c, self.cfg.final_out_noden, input_feature=self.cfg.dct_n)
if self.cfg.device != "cpu":
self.model.cuda(self.cfg.device)
print(">>> total params: {:.2f}M\n".format(
sum(p.numel() for p in self.model.parameters()) / 1000000.0))
self.lr = self.cfg.lr
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr)
dct_m, i_dct_m = get_dct_matrix(self.cfg.seq_len)
self.dct_m = torch.from_numpy(dct_m).float()
self.i_dct_m = torch.from_numpy(i_dct_m).float()
if self.cfg.device != "cpu":
self.dct_m = self.dct_m.cuda(self.cfg.device, non_blocking=True)
self.i_dct_m = self.i_dct_m.cuda(self.cfg.device, non_blocking=True)
train_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions="all", mode_name="train", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=0, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=0, global_min=0, device=self.cfg.device, debug_step=debug_step)
print("train data shape {}".format(train_dataset.gt_all_scales['p32'].shape[0]))
self.train_loader = DataLoader(
dataset=train_dataset,
batch_size=self.cfg.train_batch_size,
shuffle=True,
num_workers=self.cfg.num_works,
pin_memory=True)
self.global_max = train_dataset.global_max
self.global_min = train_dataset.global_min
self.test_loader = dict()
for act in define_actions_cmu("all"):
test_dataset = CMUMotionDataset(self.cfg.base_data_dir, actions=act, mode_name="test", input_n=self.cfg.input_n, output_n=self.cfg.output_n,
dct_used=self.cfg.dct_n, split=1, sample_rate=2,
down_key=[('p22', 'p12', self.cfg.Index2212),
('p12', 'p7', self.cfg.Index127),
('p7', 'p4', self.cfg.Index74)], test_manner=self.cfg.test_manner, global_max=self.global_max, global_min=self.global_min, device=self.cfg.device, debug_step=debug_step)
self.test_loader[act] = DataLoader(
dataset=test_dataset,
batch_size=self.cfg.test_batch_size,
shuffle=False,
num_workers=self.cfg.num_works,
pin_memory=True)
print(">>> test {} data {}".format(act, test_dataset.gt_all_scales['p32'].shape[0]))
self.summary = SummaryWriter(self.cfg.ckpt_dir)
def save(self, checkpoint_path, best_err, curr_err):
state = {
"lr": self.lr,
"best_err": best_err,
"curr_err": curr_err,
"model": self.model.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
torch.save(state, checkpoint_path)
def restore(self, checkpoint_path):
state = torch.load(checkpoint_path, map_location=self.cfg.device)
self.model.load_state_dict(state["model"])
self.optimizer.load_state_dict(state["optimizer"])
self.lr = state["lr"]
best_err = state['best_err']
curr_err = state["curr_err"]
print("load from lr {}, curr_avg {}, best_avg {}.".format(state["lr"], curr_err, best_err))
def train(self, epoch):
self.model.train()
average_loss = 0
for i, (inputs, gts) in tqdm(enumerate(self.train_loader), total=len(self.train_loader)):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
if b == 1:
continue
self.global_step = (epoch - 1) * len(self.train_loader) + i + 1
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
outputs = self.model(inputs)
losses = None
for k in outputs:
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
loss_curr = L2NormLoss_train(gts[k], outputs[k])
if losses is None:
losses = loss_curr
else:
losses = losses + loss_curr
self.summary.add_scalar(f"Loss/{k}", loss_curr, self.global_step)
self.optimizer.zero_grad()
losses.backward()
self.optimizer.step()
average_loss += losses.cpu().data.numpy()
average_loss /= (i + 1)
return average_loss
def test(self, epoch=0):
self.model.eval()
frame_ids = self.cfg.frame_ids
total_loss = np.zeros((len(define_actions_cmu("all")), len(frame_ids)))
for act_idx, act in enumerate(define_actions_cmu("all")):
count = 0
for i, (inputs, gts) in enumerate(self.test_loader[act]):
b, cv, t_len = inputs[list(inputs.keys())[0]].shape
for k in inputs:
inputs[k] = inputs[k].float().cuda(non_blocking=True, device=self.cfg.device)
gts[k] = gts[k].float().cuda(non_blocking=True, device=self.cfg.device)
with torch.no_grad():
outputs = self.model(inputs)
for k in outputs:
outputs[k] = (outputs[k] + 1) / 2
outputs[k] = outputs[k] * (self.global_max - self.global_min) + self.global_min
outputs[k] = reverse_dct_torch(outputs[k], self.i_dct_m, self.cfg.seq_len)
mygt = gts['p32'].view(-1, self.cfg.origin_noden, 3, self.cfg.seq_len).clone()
myout = outputs['p22'].view(-1, self.cfg.final_out_noden, 3, self.cfg.seq_len)
mygt[:, self.cfg.dim_used_3d, :, :] = myout
mygt[:, self.cfg.dim_repeat_32, :, :] = myout[:, self.cfg.dim_repeat_22, :, :]
mygt = mygt.view(-1, self.cfg.origin_noden*3, self.cfg.seq_len)
loss = L2NormLoss_test(gts['p32'][:, :, self.cfg.input_n:], mygt[:, :, self.cfg.input_n:], self.cfg.frame_ids)
total_loss[act_idx] += loss
count += mygt.shape[0]
if act_idx == 0 and i == 0:
pred_seq = outputs['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
gt_seq = gts['p22'].cpu().data.numpy()[0].reshape(self.cfg.final_out_noden, 3, self.cfg.seq_len)
for t in range(self.cfg.seq_len):
draw_pic_gt_pred(gt_seq[:, :, t], pred_seq[:, :, t], self.cfg.I22_plot, self.cfg.J22_plot, self.cfg.LR22_plot, os.path.join(self.cfg.ckpt_dir, "images", f"{epoch}_{act}_{t}.png"))
total_loss[act_idx] /= count
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/{act}/{frame}", total_loss[act_idx][fidx], epoch)
self.summary.add_scalar("Test/average", np.mean(total_loss), epoch)
for fidx, frame in enumerate(frame_ids):
self.summary.add_scalar(f"Test/avg{frame}", np.mean(total_loss[:, fidx]), epoch)
return total_loss
def run(self):
for epoch in range(self.start_epoch, self.cfg.n_epoch + 1):
if epoch % 2 == 0:
self.lr = lr_decay(self.optimizer, self.lr, self.cfg.lr_decay)
self.summary.add_scalar("LR", self.lr, epoch)
average_train_loss = self.train(epoch)
if average_train_loss < self.best_accuracy:
self.best_accuracy = average_train_loss
self.save(
os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_best_epoch{}_err{:.4f}.pth'.format(self.cfg.exp_name,
self.cfg.input_n,
self.cfg.output_n,
self.cfg.dct_n, epoch,
average_train_loss)), self.best_accuracy, average_train_loss)
self.save(os.path.join(self.cfg.ckpt_dir, "models",
'{}_in{}out{}dctn{}_last.pth'.format(self.cfg.exp_name, self.cfg.input_n,
self.cfg.output_n, self.cfg.dct_n)),
self.best_accuracy, average_train_loss)
if epoch % 1 == 0:
loss_l2_test = self.test(epoch)
print('Epoch: {}, LR: {}, Current err test avg: {}'.format(epoch, self.lr, np.mean(loss_l2_test)))
if __name__ == '__main__':
pass
| true
| true
|
790b956f7ead17c423bc56f87db6ed89fe0cd3ba
| 12,598
|
py
|
Python
|
dfirtrack_config/tests/status/test_status_views.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack_config/tests/status/test_status_views.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
dfirtrack_config/tests/status/test_status_views.py
|
cclauss/dfirtrack
|
2a307c5fe82e927b3c229a20a02bc0c7a5d66d9a
|
[
"Apache-2.0"
] | null | null | null |
import urllib.parse
from datetime import datetime
from unittest.mock import patch
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
from dfirtrack_config.models import Statushistory
from dfirtrack_main.models import (
Analysisstatus,
Case,
Casepriority,
Casestatus,
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class StatusViewTestCase(TestCase):
""" status view tests """
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# create object
artifactstatus_1 = Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
# create object
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
# create object
casepriority_1 = Casepriority.objects.create(casepriority_name='casepriority_1')
# create object
casestatus_1 = Casestatus.objects.create(casestatus_name='casestatus_1')
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
taskname_1 = Taskname.objects.create(taskname_name='taskname_1')
# create object
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='prio_1')
# create object
taskstatus_1 = Taskstatus.objects.create(taskstatus_name='taskstatus_1')
# create object
system_1 = System.objects.create(
system_name = 'system_1',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_2',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_3',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
# create object
Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority_1,
taskstatus = taskstatus_1,
task_modify_time = timezone.now(),
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
# create object
Artifact.objects.create(
artifact_name = 'artifact_1',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
Artifact.objects.create(
artifact_name = 'artifact_2',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
# create object
Case.objects.create(
case_name = 'case_1',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_2',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_3',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_4',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
# mock timezone.now()
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_1):
# create empty object (for simple testing get request for empty detail view this should be sufficient)
Statushistory.objects.create()
def test_status_view_not_logged_in(self):
""" test status view """
# create url
destination = '/login/?next=' + urllib.parse.quote('/config/status/', safe='')
# get response
response = self.client.get('/config/status/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_view_logged_in(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(response.status_code, 200)
def test_status_view_template(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/status/status.html')
def test_status_view_get_user_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_view_redirect(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# create url
destination = urllib.parse.quote('/config/status/', safe='/')
# get response
response = self.client.get('/config/status', follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_status_view_get_object_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# get querysets
analysisstatus_all = Analysisstatus.objects.all().order_by('analysisstatus_name')
artifactpriority_all = Artifactpriority.objects.all().order_by('artifactpriority_name')
artifactstatus_all = Artifactstatus.objects.all().order_by('artifactstatus_name')
casepriority_all = Casepriority.objects.all().order_by('casepriority_name')
casestatus_all = Casestatus.objects.all().order_by('casestatus_name')
systemstatus_all = Systemstatus.objects.all().order_by('systemstatus_name')
taskstatus_all = Taskstatus.objects.all().order_by('taskstatus_name')
taskpriority_all = Taskpriority.objects.all().order_by('taskpriority_name')
# compare
self.assertEqual(response.context['artifacts_number'], 2)
self.assertEqual(response.context['cases_number'], 4)
self.assertEqual(response.context['systems_number'], 3)
self.assertEqual(response.context['tasks_number'], 1)
self.assertEqual(type(response.context['analysisstatus_all']), type(analysisstatus_all))
self.assertEqual(type(response.context['artifactpriority_all']), type(artifactpriority_all))
self.assertEqual(type(response.context['artifactstatus_all']), type(artifactstatus_all))
self.assertEqual(type(response.context['casepriority_all']), type(casepriority_all))
self.assertEqual(type(response.context['casestatus_all']), type(casestatus_all))
self.assertEqual(type(response.context['systemstatus_all']), type(systemstatus_all))
self.assertEqual(type(response.context['taskpriority_all']), type(taskpriority_all))
self.assertEqual(type(response.context['taskstatus_all']), type(taskstatus_all))
def test_status_view_get_statushistory_entry_numbers_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get response
response = self.client.get('/config/status/')
# compare
self.assertEqual(type(response.context['statushistory_all']), type(reversed(Statushistory.objects.all())))
# TODO: test number of queryset elements in context element 'statushistory_all' according to 'statushistory_last_entrys' in MainConfigModel
# TODO: number also depends on available statushistory elements
# TODO: find a way to count reversed queryset
#self.assertEqual(response.context['statushistory_all'].count(), 2)
def test_status_detail_view_not_logged_in(self):
""" test status view """
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# create url
destination = '/login/?next=' + urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='')
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/', follow=True)
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_detail_view_logged_in(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertEqual(response.status_code, 200)
def test_status_detail_view_template(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertTemplateUsed(response, 'dfirtrack_config/status/status_detail.html')
def test_status_detail_view_get_user_context(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# get response
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
# compare
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_detail_view_redirect(self):
""" test status view """
# login testuser
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
# get time
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
# get object
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
# create url
destination = urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='/')
# get response
response = self.client.get('/config/status/' + str(statushistory_id), follow=True)
# compare
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| 39.993651
| 147
| 0.658358
|
import urllib.parse
from datetime import datetime
from unittest.mock import patch
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from dfirtrack_artifacts.models import (
Artifact,
Artifactpriority,
Artifactstatus,
Artifacttype,
)
from dfirtrack_config.models import Statushistory
from dfirtrack_main.models import (
Analysisstatus,
Case,
Casepriority,
Casestatus,
System,
Systemstatus,
Task,
Taskname,
Taskpriority,
Taskstatus,
)
class StatusViewTestCase(TestCase):
@classmethod
def setUpTestData(cls):
test_user = User.objects.create_user(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
artifactstatus_1 = Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
artifacttype_1 = Artifacttype.objects.create(artifacttype_name='artifacttype_1')
casepriority_1 = Casepriority.objects.create(casepriority_name='casepriority_1')
casestatus_1 = Casestatus.objects.create(casestatus_name='casestatus_1')
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
taskname_1 = Taskname.objects.create(taskname_name='taskname_1')
taskpriority_1 = Taskpriority.objects.create(taskpriority_name='prio_1')
taskstatus_1 = Taskstatus.objects.create(taskstatus_name='taskstatus_1')
system_1 = System.objects.create(
system_name = 'system_1',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_2',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
System.objects.create(
system_name = 'system_3',
systemstatus = systemstatus_1,
system_created_by_user_id = test_user,
system_modified_by_user_id = test_user,
)
Task.objects.create(
taskname = taskname_1,
taskpriority = taskpriority_1,
taskstatus = taskstatus_1,
task_modify_time = timezone.now(),
task_created_by_user_id = test_user,
task_modified_by_user_id = test_user,
)
Artifact.objects.create(
artifact_name = 'artifact_1',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
Artifact.objects.create(
artifact_name = 'artifact_2',
artifactstatus = artifactstatus_1,
artifacttype = artifacttype_1,
system = system_1,
artifact_created_by_user_id = test_user,
artifact_modified_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_1',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_2',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_3',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
Case.objects.create(
case_name = 'case_4',
casepriority = casepriority_1,
casestatus = casestatus_1,
case_is_incident = True,
case_created_by_user_id = test_user,
)
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
with patch.object(timezone, 'now', return_value=t_1):
Statushistory.objects.create()
def test_status_view_not_logged_in(self):
destination = '/login/?next=' + urllib.parse.quote('/config/status/', safe='')
response = self.client.get('/config/status/', follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_view_logged_in(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(response.status_code, 200)
def test_status_view_template(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertTemplateUsed(response, 'dfirtrack_config/status/status.html')
def test_status_view_get_user_context(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_view_redirect(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
destination = urllib.parse.quote('/config/status/', safe='/')
response = self.client.get('/config/status', follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
def test_status_view_get_object_context(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
analysisstatus_all = Analysisstatus.objects.all().order_by('analysisstatus_name')
artifactpriority_all = Artifactpriority.objects.all().order_by('artifactpriority_name')
artifactstatus_all = Artifactstatus.objects.all().order_by('artifactstatus_name')
casepriority_all = Casepriority.objects.all().order_by('casepriority_name')
casestatus_all = Casestatus.objects.all().order_by('casestatus_name')
systemstatus_all = Systemstatus.objects.all().order_by('systemstatus_name')
taskstatus_all = Taskstatus.objects.all().order_by('taskstatus_name')
taskpriority_all = Taskpriority.objects.all().order_by('taskpriority_name')
self.assertEqual(response.context['artifacts_number'], 2)
self.assertEqual(response.context['cases_number'], 4)
self.assertEqual(response.context['systems_number'], 3)
self.assertEqual(response.context['tasks_number'], 1)
self.assertEqual(type(response.context['analysisstatus_all']), type(analysisstatus_all))
self.assertEqual(type(response.context['artifactpriority_all']), type(artifactpriority_all))
self.assertEqual(type(response.context['artifactstatus_all']), type(artifactstatus_all))
self.assertEqual(type(response.context['casepriority_all']), type(casepriority_all))
self.assertEqual(type(response.context['casestatus_all']), type(casestatus_all))
self.assertEqual(type(response.context['systemstatus_all']), type(systemstatus_all))
self.assertEqual(type(response.context['taskpriority_all']), type(taskpriority_all))
self.assertEqual(type(response.context['taskstatus_all']), type(taskstatus_all))
def test_status_view_get_statushistory_entry_numbers_context(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
response = self.client.get('/config/status/')
self.assertEqual(type(response.context['statushistory_all']), type(reversed(Statushistory.objects.all())))
def test_status_detail_view_not_logged_in(self):
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = '/login/?next=' + urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='')
response = self.client.get('/config/status/' + str(statushistory_id) + '/', follow=True)
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
def test_status_detail_view_logged_in(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
self.assertEqual(response.status_code, 200)
def test_status_detail_view_template(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
self.assertTemplateUsed(response, 'dfirtrack_config/status/status_detail.html')
def test_status_detail_view_get_user_context(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
response = self.client.get('/config/status/' + str(statushistory_id) + '/')
self.assertEqual(str(response.context['user']), 'testuser_status')
def test_status_detail_view_redirect(self):
self.client.login(username='testuser_status', password='D9lPsoHFXeCNKEzM3IgE')
t_1 = datetime(2020, 11, 22, 11, 22, 33, tzinfo=timezone.utc)
statushistory_id = Statushistory.objects.get(statushistory_time=t_1).statushistory_id
destination = urllib.parse.quote('/config/status/' + str(statushistory_id) + '/', safe='/')
response = self.client.get('/config/status/' + str(statushistory_id), follow=True)
self.assertRedirects(response, destination, status_code=301, target_status_code=200)
| true
| true
|
790b95b3e36688282051a17cf920d4427b26a9d5
| 12,225
|
py
|
Python
|
salt/states/reg.py
|
shaktigupta200/salt
|
a5f43a5e247ee9c23852db2d21d40df8712ceb43
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/states/reg.py
|
shaktigupta200/salt
|
a5f43a5e247ee9c23852db2d21d40df8712ceb43
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/states/reg.py
|
shaktigupta200/salt
|
a5f43a5e247ee9c23852db2d21d40df8712ceb43
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
r'''
Manage the Windows registry
===========================
Many python developers think of registry keys as if they were python keys in a
dictionary which is not the case. The windows registry is broken down into the
following components:
-----
Hives
-----
This is the top level of the registry. They all begin with HKEY.
- HKEY_CLASSES_ROOT (HKCR)
- HKEY_CURRENT_USER(HKCU)
- HKEY_LOCAL MACHINE (HKLM)
- HKEY_USER (HKU)
- HKEY_CURRENT_CONFIG
----
Keys
----
Hives contain keys. These are basically the folders beneath the hives. They can
contain any number of subkeys.
-----------------
Values or Entries
-----------------
Values or Entries are the name/data pairs beneath the keys and subkeys. All keys
have a default name/data pair. It is usually "(Default)"="(value not set)". The
actual value for the name and the date is Null. The registry editor will display
"(Default)" and "(value not set)".
-------
Example
-------
The following example is taken from the windows startup portion of the registry:
```
[HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\Run]
"RTHDVCPL"="\"C:\\Program Files\\Realtek\\Audio\\HDA\\RtkNGUI64.exe\" -s"
"NvBackend"="\"C:\\Program Files (x86)\\NVIDIA Corporation\\Update Core\\NvBackend.exe\""
"BTMTrayAgent"="rundll32.exe \"C:\\Program Files (x86)\\Intel\\Bluetooth\\btmshellex.dll\",TrayApp"
```
In this example these are the values for each:
Hive: `HKEY_LOCAL_MACHINE`
Key and subkeys: `SOFTWARE\Microsoft\Windows\CurrentVersion\Run`
Value:
- There are 3 value names: `RTHDVCPL`, `NvBackend`, and `BTMTrayAgent`
- Each value name has a corresponding value
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import logging
import salt.utils.stringutils
log = logging.getLogger(__name__)
def __virtual__():
'''
Load this state if the reg module exists
'''
if 'reg.read_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.read_value')
if 'reg.set_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.set_value')
if 'reg.delete_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_value')
if 'reg.delete_key_recursive' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_key_recursive')
return 'reg'
def _parse_key(key):
'''
split the hive from the key
'''
splt = key.split("\\")
hive = splt.pop(0)
key = '\\'.join(splt)
return hive, key
def present(name,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False):
'''
Ensure a registry key or value is present.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param str vdata: The value you'd like to set. If a value name (vname) is
passed, this will be the data for that value name. If not, this will be the
(Default) value for the key.
The type for the (Default) value is always REG_SZ and cannot be changed.
This parameter is optional. If not passed, the Key will be created with no
associated item/value pairs.
:param str vtype: The value type for the data you wish to store in the
registry. Valid values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ (Default)
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will set the ``(Default)`` value for the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vdata: 2016.3.1
The following example will set the value for the ``version`` entry under the
``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The
value will be reflected in ``Wow6432Node``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\\SOFTWARE\\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_current = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already configured' \
''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)',
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': vdata_decoded}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
# Configure the value
ret['result'] = __utils__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
return ret
def absent(name, vname=None, use_32bit_registry=False):
'''
Ensure a registry value is removed. To remove a key use key_absent.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\\SOFTWARE\\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not
passed, the (Default) value would be deleted.
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
reg_check = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)')}
# Check for test option
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
# Delete the value
ret['result'] = __utils__['reg.delete_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = r'Removed {0} from {1}'.format(key, hive)
return ret
def key_absent(name, use_32bit_registry=False):
r'''
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove a key and all value
entries it contains. It will fail if the key contains subkeys.
:param str name: A string representing the full path to the key to be
removed to include the hive and the keypath. The hive can be any of the
following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
under the ``HKEY_CURRENT_USER`` hive.
Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.key_absent:
- force: True
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
# Determine what to do
if not __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)
}}}
# Check for test option
if __opts__['test']:
ret['result'] = None
return ret
# Delete the value
__utils__['reg.delete_key_recursive'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)
if __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
| 32.427056
| 117
| 0.594438
|
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.stringutils
log = logging.getLogger(__name__)
def __virtual__():
if 'reg.read_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.read_value')
if 'reg.set_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.set_value')
if 'reg.delete_value' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_value')
if 'reg.delete_key_recursive' not in __utils__:
return (False, 'reg state module failed to load: '
'missing module function: reg.delete_key_recursive')
return 'reg'
def _parse_key(key):
splt = key.split("\\")
hive = splt.pop(0)
key = '\\'.join(splt)
return hive, key
def present(name,
vname=None,
vdata=None,
vtype='REG_SZ',
use_32bit_registry=False):
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
reg_current = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if vdata == reg_current['vdata'] and reg_current['success']:
ret['comment'] = '{0} in {1} is already configured' \
''.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)',
salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'),
'Value': vdata_decoded}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
ret['result'] = __utils__['reg.set_value'](hive=hive,
key=key,
vname=vname,
vdata=vdata,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to add {0} to {1}\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = r'Added {0} to {1}\{2}'.format(name, hive, key)
return ret
def absent(name, vname=None, use_32bit_registry=False):
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
reg_check = __utils__['reg.read_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not reg_check['success'] or reg_check['vdata'] == '(value not set)':
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': r'{0}\{1}'.format(hive, key),
'Entry': '{0}'.format(vname if vname else '(Default)')}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
ret['result'] = __utils__['reg.delete_value'](hive=hive,
key=key,
vname=vname,
use_32bit_registry=use_32bit_registry)
if not ret['result']:
ret['changes'] = {}
ret['comment'] = r'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = r'Removed {0} from {1}'.format(key, hive)
return ret
def key_absent(name, use_32bit_registry=False):
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
hive, key = _parse_key(name)
if not __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {
'Removed': {
'Key': r'{0}\{1}'.format(hive, key)
}}}
if __opts__['test']:
ret['result'] = None
return ret
__utils__['reg.delete_key_recursive'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)
if __utils__['reg.read_value'](hive=hive,
key=key,
use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
| true
| true
|
790b968633cd30fabb38efed4a47431142e77aa1
| 3,185
|
py
|
Python
|
temboo/core/Library/Stripe/Coupons/RetrieveCoupon.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Stripe/Coupons/RetrieveCoupon.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Stripe/Coupons/RetrieveCoupon.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
# -*- coding: utf-8 -*-
###############################################################################
#
# RetrieveCoupon
# Retrieves a coupon with specified coupon id.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RetrieveCoupon(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RetrieveCoupon Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RetrieveCoupon, self).__init__(temboo_session, '/Library/Stripe/Coupons/RetrieveCoupon')
def new_input_set(self):
return RetrieveCouponInputSet()
def _make_result_set(self, result, path):
return RetrieveCouponResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RetrieveCouponChoreographyExecution(session, exec_id, path)
class RetrieveCouponInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RetrieveCoupon
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key provided by Stripe)
"""
super(RetrieveCouponInputSet, self)._set_input('APIKey', value)
def set_CouponID(self, value):
"""
Set the value of the CouponID input for this Choreo. ((required, string) The unique identifier of the coupon you want to retrieve)
"""
super(RetrieveCouponInputSet, self)._set_input('CouponID', value)
class RetrieveCouponResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RetrieveCoupon Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Stripe)
"""
return self._output.get('Response', None)
class RetrieveCouponChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RetrieveCouponResultSet(response, path)
| 36.193182
| 138
| 0.686656
| true
| true
|
|
790b96fdcab1c24d8db48f0feb9a15c6ae07321a
| 1,006
|
py
|
Python
|
marvin/command_router.py
|
bennyandresen/marvin-mk2
|
71463df161489dcef62b7abd54018c9eca66216f
|
[
"MIT"
] | 16
|
2020-06-16T20:49:33.000Z
|
2022-02-09T03:38:54.000Z
|
marvin/command_router.py
|
bennyandresen/marvin-mk2
|
71463df161489dcef62b7abd54018c9eca66216f
|
[
"MIT"
] | 76
|
2020-06-06T22:45:02.000Z
|
2022-03-24T21:28:56.000Z
|
marvin/command_router.py
|
bennyandresen/marvin-mk2
|
71463df161489dcef62b7abd54018c9eca66216f
|
[
"MIT"
] | 11
|
2020-06-07T12:50:44.000Z
|
2022-02-09T03:38:15.000Z
|
import re
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
class CommandRouter:
def __init__(self, subrouters: List["CommandRouter"] = []) -> None:
self.command_handlers: Dict[str, Callable[..., Awaitable[Any]]] = dict()
for subrouter in subrouters:
self.command_handlers.update(subrouter.command_handlers)
def register_command(self, regex: str) -> Callable[[Callable], Callable]:
def decorator(
function: Callable[..., Awaitable[Any]]
) -> Callable[..., Awaitable[Any]]:
self.command_handlers[regex] = function
return function
return decorator
def find_commands(self, body: str) -> List[str]:
"""Find all commands in a comment."""
commands = []
for regex in self.command_handlers.keys():
for _ in re.findall(regex, body):
commands.append(regex)
return commands
| 32.451613
| 80
| 0.637177
|
import re
from typing import Any
from typing import Awaitable
from typing import Callable
from typing import Dict
from typing import List
class CommandRouter:
def __init__(self, subrouters: List["CommandRouter"] = []) -> None:
self.command_handlers: Dict[str, Callable[..., Awaitable[Any]]] = dict()
for subrouter in subrouters:
self.command_handlers.update(subrouter.command_handlers)
def register_command(self, regex: str) -> Callable[[Callable], Callable]:
def decorator(
function: Callable[..., Awaitable[Any]]
) -> Callable[..., Awaitable[Any]]:
self.command_handlers[regex] = function
return function
return decorator
def find_commands(self, body: str) -> List[str]:
commands = []
for regex in self.command_handlers.keys():
for _ in re.findall(regex, body):
commands.append(regex)
return commands
| true
| true
|
790b97ed689e2dc900e9189fe8b09bbac3d3f114
| 12,772
|
py
|
Python
|
drone_2.py
|
SVJayanthi/DroneSimulation
|
8fe52609cb367360729f16f4f6402faeadaf6b06
|
[
"MIT"
] | 1
|
2019-06-19T02:22:58.000Z
|
2019-06-19T02:22:58.000Z
|
drone_2.py
|
SVJayanthi/DroneTrafficSimulation
|
8fe52609cb367360729f16f4f6402faeadaf6b06
|
[
"MIT"
] | null | null | null |
drone_2.py
|
SVJayanthi/DroneTrafficSimulation
|
8fe52609cb367360729f16f4f6402faeadaf6b06
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 10 22:59:51 2019
@author: Sravan
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 14 22:36:21 2019
@author: Sravan
"""
import csv
import numpy as np
from scipy.spatial.distance import pdist, squareform, euclidean, cdist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import scipy.integrate as integrate
import matplotlib.animation as animation
"""
Variables: Wind speed, Air traffic (# of drones), Obstacles (Trees, Buildings)
Fixed: Distance, Air Resistance, Gravity, Battery level
Rules: Drone Speed (Air traffic, Wind speed, Battery level), Collisions (Drone position)
Study: Time, Speed
Movement: v_air = sqrt(mg/(nAρ)), p = 1.22 kg m^-3, A = 1 m^2
½cρAv2 = mgtanθ, c = drag coefficient
P = ½ρnAv_air(v_air2 – v2sin2θ)
Collisions: Drone - Increase/Decrease Speed, 2) Change path- increasing elevation
https://www.research-drone.com/en/extreme_climb_rate.html
https://en.wikipedia.org/wiki/Amazon_Prime_Air
https://homepages.abdn.ac.uk/nph120/meteo/DroneFlight.pdf
"""
class ParticleBox:
"""Orbits class
init_state is an [N x 6] array, where N is the number of particles:
[[xi1, yi1, zi1, xf1, yf1, zf1, vx1, vy1, vz1, t1],
[xi2, yi2, zi2, xf2, yf2, zf2, vx2, vy2, vz2, t2],
... ]
bounds is the size of the box: [xmin, xmax, ymin, ymax, zmin, zmax]
"""
def __init__(self,
drones = 1,
wind = [0, 0, 0],
obstacles = 0,
bounds = [-32000, 32000, -32000, 32000, 0, 150],
size = 1.5,
max_height = 122,
max_speed = 22.34,
acc = 7,
M = 25.0,
G = 9.81):
self.drones = drones
self.wind = wind
self.size = size
self.G = G
self.max_height = max_height
self.max_speed = max_speed
self.acc_vert = acc
self.acc_vert_eff = acc + G
self.acc_hor = acc
self.obstacles = 0
self.obstacles_size = 40
self.time_elapsed = 0
self.bounds = bounds
np.random.seed(0)
init_state = np.random.random((drones, 10))
init_state[:, :2] -= 0.5
init_state[:, :2] *= bounds[1]*2
init_state[:, 2:] = 0.0
for i in range(len(init_state)):
vecs = [64000.0, 64000.0]
while vecs[0] > bounds[1] or vecs[0] < bounds[0] or vecs[1] > bounds[3] or vecs[1] < bounds[2]:
vecs = np.random.standard_normal(2)
mags = np.linalg.norm(vecs)
vecs /= mags
vecs *= 16000
vecs += init_state[i, :2]
init_state[i, 3:5] =vecs
if obstacles > 0:
np.random.seed(1)
obs_state = np.random.random((obstacles, 3))
obs_state[:, :3] -= 0.5
obs_state[:, :2] *= bounds[1]*2
obs_state[:, 2] *= bounds[5]*2
self.init_state = np.asarray(init_state, dtype=float)
#self.obs_state = np.asarray(obs_state, dtype=float)
self.M = M * np.ones(self.init_state.shape[0])
self.state = self.init_state.copy()
#update velocity
self.state[:, 6] = self.wind[0]
self.state[:, 7] = self.wind[1]
self.state[:, 8] = self.wind[2]
def step(self, dt):
"""step once by dt seconds"""
self.time_elapsed += dt
# find distance to goal
D = cdist(self.state[:, :3], self.state[:, 3:6], 'euclidean')
ind, din = np.where(D > 122)
uniqua = (ind == din)
ind = ind[uniqua]
# update velocities of individual drones
for i in zip(ind):
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver = self.acc_vert
a_ver_eff = self.acc_vert_eff
height = self.max_height - self.state[i, 2]
print(height)
if height > 0:
n = 1
if v > 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver)
t_end = abs(v / a_ver)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > (height - area)):
v_avg = 0
self.state[i, 8] = 0
self.state[i, 2] = self.max_height
elif (stop > (height - area)):
t_max = 0
if stop < height:
a = 2 * (a_ver)**2
b = 4 * (a_ver) * v
c = v**2 - 2 * a_ver * height
t_max = (-b + (b**2 - 4 * a * c)**(0.5)) / (2 * a)
v_max = v + a_ver * (t_max / dt)
v_end = 2 * v_max - v - a_ver * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v + a_ver * dt / 2
self.state[i, 8] += a_ver * dt
elif height < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and abs(stop) <= abs(height)):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
elif (stop < (height - area)):
v_max = (height * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
else:
self.state[i, 8] += 0 * dt
self.state[i, 2] += v_avg * dt
# unit vector
r = self.state[i, 3:5] - self.state[i, :2]
m = np.linalg.norm(r)
u = r / m
#accelearting horizontal
a_hor = self.acc_hor
v_hor = self.state[i, 6:8]
h = np.linalg.norm(v_hor)
stop = h**2/(2 * a_hor)
t_end = h / a_hor
b1 = (h**2 + t_end**2)**(0.5)
b2 = ((h + a_hor * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_hor * dt)**2 + dt**2)**(0.5)
s2 = dt*2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
s = 2 * t / (b2 - b1)
area = (t + (b2 - b1) * s)
if (t_end <= dt and stop < area):
v_hor = (h / 2) * (t_end / dt)
self.state[i, 6:8] = (h - (a_hor * t_end)) * u
elif (stop > (m - area)):
v_max = (m * (2 * a_hor))**(0.5)
t_max = (v_max - h)/a_hor
v_end = 2 * v_max - h - a_hor * dt
v_hor = ((v_max + h) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 6:8] = v_end * u
else:
v_hor = h + a_hor * dt / 2
self.state[i, 6:8] = (h + a_hor * dt) * u
self.state[i, :2] += (v_hor * dt) * u
#find drones hovering
done, fund = np.where(D <= 122)
uniquo = (done == fund)
done = done[uniquo]
for d in zip(done):
print("here")
#velocity vector
v = self.state[i, 8]
v_avg = v
a_ver_eff = self.acc_vert_eff
#accelerating negative z
n = -1
if v < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > area):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
self.state[i, 9] = self.time_elapsed
elif (stop < (-self.state[i, 2] - area)):
v_max = ((-self.state[i, 2]) * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
self.state[i, 2] += v_avg * dt
E = squareform(pdist(self.state[:, :3], 'euclidean'))
ind1, ind2 = np.where(E < (2 * self.size))
unique = (ind1 < ind2)
ind1 = ind1[unique]
ind2 = ind2[unique]
for i1, i2 in zip(ind1, ind2):
if (self.state[i1, 2] > self.state[i2, 2]):
self.state[i1, 8] += (self.acc_vert) * dt
self.state[i2, 8] -= (self.acc_vert_eff) * dt
else:
self.state[i1, 8] -= (self.acc_vert) * dt
self.state[i2, 8] += (self.acc_vert_eff) * dt
if self.obstacles > 0:
DO = np.vstack([self.state[:, :3].copy(), self.obs_state.copy()])
F = squareform(pdist(DO, 'euclidean'))
d_rone, obs = np.where(F < (2 * self.obstacles_size))
unique = (d_rone < obs and obs >= self.drones)
d_rone = d_rone[unique]
obs = obs[unique]
for d, o in zip(d_rone, obs):
if (self.obs_state[o-self.drones, 2] < 110 and self.state[d, 2] < self.obs_state[o-self.drones, 2]):
self.state[d, 8] += self.acc_vert * dt
else:
r = self.state[d, 3:5] - self.state[d, :2]
ro = self.obs_state[o-self.drones, :2] - self.state[d, :2]
r_rel = np.cross(r, ro)
if (r_rel[2] > 0):
self.state[d, 6] += self.acc_hor * dt
self.state[d, 7] += self.acc_hor * dt
else:
self.state[d, 6] -= self.acc_hor * dt
self.state[d, 7] -= self.acc_hor * dt
#restrict velocity
np.clip(self.state[:, 6], -self.max_speed + self.wind[0], self.max_speed + self.wind[0])
np.clip(self.state[:, 7], -self.max_speed + self.wind[1], self.max_speed + self.wind[1])
#------------------------------------------------------------
# set up initial state
box = ParticleBox()
dt = 1. # 1 fps
#ani = animation.FuncAnimation(fig, animate, frames=600, interval=10, init_func=init)
for i in range(10):
box.step(dt)
#final = np.hstack([box.init_state[:, :3], box.state[:, 3:]])
#with open('people.csv', 'w') as writeFile:
# writer = csv.writer(writeFile)
# writer.writerows(final) #2d list
"""with open('initial.csv', 'w') as writeInit:
writer = csv.writer(writeInit)
writer.writerows(box.init_state)
writeInit.close()
"""
with open('final_2.csv', 'w') as writeFin:
writer = csv.writer(writeFin)
writer.writerows(box.init_state)
writer.writerows(box.state)
writeFin.close()
print(box.state)
| 37.127907
| 116
| 0.432509
|
import csv
import numpy as np
from scipy.spatial.distance import pdist, squareform, euclidean, cdist
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import scipy.integrate as integrate
import matplotlib.animation as animation
class ParticleBox:
def __init__(self,
drones = 1,
wind = [0, 0, 0],
obstacles = 0,
bounds = [-32000, 32000, -32000, 32000, 0, 150],
size = 1.5,
max_height = 122,
max_speed = 22.34,
acc = 7,
M = 25.0,
G = 9.81):
self.drones = drones
self.wind = wind
self.size = size
self.G = G
self.max_height = max_height
self.max_speed = max_speed
self.acc_vert = acc
self.acc_vert_eff = acc + G
self.acc_hor = acc
self.obstacles = 0
self.obstacles_size = 40
self.time_elapsed = 0
self.bounds = bounds
np.random.seed(0)
init_state = np.random.random((drones, 10))
init_state[:, :2] -= 0.5
init_state[:, :2] *= bounds[1]*2
init_state[:, 2:] = 0.0
for i in range(len(init_state)):
vecs = [64000.0, 64000.0]
while vecs[0] > bounds[1] or vecs[0] < bounds[0] or vecs[1] > bounds[3] or vecs[1] < bounds[2]:
vecs = np.random.standard_normal(2)
mags = np.linalg.norm(vecs)
vecs /= mags
vecs *= 16000
vecs += init_state[i, :2]
init_state[i, 3:5] =vecs
if obstacles > 0:
np.random.seed(1)
obs_state = np.random.random((obstacles, 3))
obs_state[:, :3] -= 0.5
obs_state[:, :2] *= bounds[1]*2
obs_state[:, 2] *= bounds[5]*2
self.init_state = np.asarray(init_state, dtype=float)
self.M = M * np.ones(self.init_state.shape[0])
self.state = self.init_state.copy()
self.state[:, 6] = self.wind[0]
self.state[:, 7] = self.wind[1]
self.state[:, 8] = self.wind[2]
def step(self, dt):
self.time_elapsed += dt
D = cdist(self.state[:, :3], self.state[:, 3:6], 'euclidean')
ind, din = np.where(D > 122)
uniqua = (ind == din)
ind = ind[uniqua]
for i in zip(ind):
v = self.state[i, 8]
v_avg = v
a_ver = self.acc_vert
a_ver_eff = self.acc_vert_eff
height = self.max_height - self.state[i, 2]
print(height)
if height > 0:
n = 1
if v > 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver)
t_end = abs(v / a_ver)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > (height - area)):
v_avg = 0
self.state[i, 8] = 0
self.state[i, 2] = self.max_height
elif (stop > (height - area)):
t_max = 0
if stop < height:
a = 2 * (a_ver)**2
b = 4 * (a_ver) * v
c = v**2 - 2 * a_ver * height
t_max = (-b + (b**2 - 4 * a * c)**(0.5)) / (2 * a)
v_max = v + a_ver * (t_max / dt)
v_end = 2 * v_max - v - a_ver * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v + a_ver * dt / 2
self.state[i, 8] += a_ver * dt
elif height < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and abs(stop) <= abs(height)):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
elif (stop < (height - area)):
v_max = (height * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
else:
self.state[i, 8] += 0 * dt
self.state[i, 2] += v_avg * dt
r = self.state[i, 3:5] - self.state[i, :2]
m = np.linalg.norm(r)
u = r / m
a_hor = self.acc_hor
v_hor = self.state[i, 6:8]
h = np.linalg.norm(v_hor)
stop = h**2/(2 * a_hor)
t_end = h / a_hor
b1 = (h**2 + t_end**2)**(0.5)
b2 = ((h + a_hor * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_hor * dt)**2 + dt**2)**(0.5)
s2 = dt*2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
s = 2 * t / (b2 - b1)
area = (t + (b2 - b1) * s)
if (t_end <= dt and stop < area):
v_hor = (h / 2) * (t_end / dt)
self.state[i, 6:8] = (h - (a_hor * t_end)) * u
elif (stop > (m - area)):
v_max = (m * (2 * a_hor))**(0.5)
t_max = (v_max - h)/a_hor
v_end = 2 * v_max - h - a_hor * dt
v_hor = ((v_max + h) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 6:8] = v_end * u
else:
v_hor = h + a_hor * dt / 2
self.state[i, 6:8] = (h + a_hor * dt) * u
self.state[i, :2] += (v_hor * dt) * u
done, fund = np.where(D <= 122)
uniquo = (done == fund)
done = done[uniquo]
for d in zip(done):
print("here")
v = self.state[i, 8]
v_avg = v
a_ver_eff = self.acc_vert_eff
n = -1
if v < 0:
n = v / abs(v)
stop = n * v**2/(2 * a_ver_eff)
t_end = abs(v / a_ver_eff)
b1 = (v**2 + t_end**2)**(0.5)
b2 = ((v + n * a_ver_eff * dt)**2 + (t_end + dt)**2)**(0.5)
s1 = ((a_ver_eff * dt)**2 + dt**2)**(0.5)
s2 = dt * 2
P = (b2 - b1) + s1 + s2
t = ((P/2) * (P/2 - s1) * (P/2 - s2) * (P/2 - b2 + b1))**(0.5)
h = 2 * t / (b2 - b1)
area = n * (t + (b2 - b1) * h)
if (t_end <= dt and stop > area):
v_avg = (v / 2) * (t_end / dt)
self.state[i, 8] = v + a_ver_eff * t_end
self.state[i, 9] = self.time_elapsed
elif (stop < (-self.state[i, 2] - area)):
v_max = ((-self.state[i, 2]) * (2 * a_ver_eff))**(0.5)
t_max = (v_max - v)/a_ver_eff
v_end = 2 * v_max - v - a_ver_eff * dt
v_avg = ((v_max + v) / 2) * (t_max / dt) + ((v_max + v_end) / 2) * ((dt - t_max) / dt)
self.state[i, 8] = v_end
else:
v_avg = v - a_ver_eff * dt / 2
self.state[i, 8] = v - a_ver_eff * dt
self.state[i, 2] += v_avg * dt
E = squareform(pdist(self.state[:, :3], 'euclidean'))
ind1, ind2 = np.where(E < (2 * self.size))
unique = (ind1 < ind2)
ind1 = ind1[unique]
ind2 = ind2[unique]
for i1, i2 in zip(ind1, ind2):
if (self.state[i1, 2] > self.state[i2, 2]):
self.state[i1, 8] += (self.acc_vert) * dt
self.state[i2, 8] -= (self.acc_vert_eff) * dt
else:
self.state[i1, 8] -= (self.acc_vert) * dt
self.state[i2, 8] += (self.acc_vert_eff) * dt
if self.obstacles > 0:
DO = np.vstack([self.state[:, :3].copy(), self.obs_state.copy()])
F = squareform(pdist(DO, 'euclidean'))
d_rone, obs = np.where(F < (2 * self.obstacles_size))
unique = (d_rone < obs and obs >= self.drones)
d_rone = d_rone[unique]
obs = obs[unique]
for d, o in zip(d_rone, obs):
if (self.obs_state[o-self.drones, 2] < 110 and self.state[d, 2] < self.obs_state[o-self.drones, 2]):
self.state[d, 8] += self.acc_vert * dt
else:
r = self.state[d, 3:5] - self.state[d, :2]
ro = self.obs_state[o-self.drones, :2] - self.state[d, :2]
r_rel = np.cross(r, ro)
if (r_rel[2] > 0):
self.state[d, 6] += self.acc_hor * dt
self.state[d, 7] += self.acc_hor * dt
else:
self.state[d, 6] -= self.acc_hor * dt
self.state[d, 7] -= self.acc_hor * dt
np.clip(self.state[:, 6], -self.max_speed + self.wind[0], self.max_speed + self.wind[0])
np.clip(self.state[:, 7], -self.max_speed + self.wind[1], self.max_speed + self.wind[1])
box = ParticleBox()
dt = 1.
for i in range(10):
box.step(dt)
ith open('final_2.csv', 'w') as writeFin:
writer = csv.writer(writeFin)
writer.writerows(box.init_state)
writer.writerows(box.state)
writeFin.close()
print(box.state)
| true
| true
|
790b97f05b63919ea246766fc8126cb5b6fdd78c
| 959
|
py
|
Python
|
tests/sig_return_type/test.py
|
ujway/serverless-go
|
11cf5c422ef93b5fbe1acfbc7a8d6e25e33072e2
|
[
"Apache-2.0"
] | 864
|
2017-01-03T15:30:14.000Z
|
2020-01-01T17:34:25.000Z
|
tests/sig_return_type/test.py
|
ujway/serverless-go
|
11cf5c422ef93b5fbe1acfbc7a8d6e25e33072e2
|
[
"Apache-2.0"
] | 52
|
2017-01-09T21:11:30.000Z
|
2018-07-25T10:41:56.000Z
|
tests/sig_return_type/test.py
|
ujway/serverless-go
|
11cf5c422ef93b5fbe1acfbc7a8d6e25e33072e2
|
[
"Apache-2.0"
] | 76
|
2017-01-04T12:19:37.000Z
|
2019-12-28T17:40:35.000Z
|
#
# Copyright 2017 Alsanium, SAS. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import handler
class Context:
def get_remaining_time_in_millis(self):
pass
def log(self):
pass
class TestCase(unittest.TestCase):
def test_case(self):
with self.assertRaisesRegexp(AttributeError, "runtime: symbol Handle is not valid"):
handler.Handle({}, Context())
| 28.205882
| 92
| 0.727842
|
import unittest
import handler
class Context:
def get_remaining_time_in_millis(self):
pass
def log(self):
pass
class TestCase(unittest.TestCase):
def test_case(self):
with self.assertRaisesRegexp(AttributeError, "runtime: symbol Handle is not valid"):
handler.Handle({}, Context())
| true
| true
|
790b98bfc338ce0fcac8e7ea7e6c6d58c8e25f87
| 14,795
|
py
|
Python
|
dacbench/envs/sgd.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | 1
|
2021-02-05T16:18:56.000Z
|
2021-02-05T16:18:56.000Z
|
dacbench/envs/sgd.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | null | null | null |
dacbench/envs/sgd.py
|
goktug97/DACBench
|
953bc8efacdb993889b223110e25f7e453c86b2d
|
[
"Apache-2.0"
] | null | null | null |
import math
import warnings
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from gym.utils import seeding
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
class SGDEnv(AbstractEnv):
"""
Environment to control the learning rate of adam
"""
def __init__(self, config):
"""
Initialize SGD Env
Parameters
-------
config : objdict
Environment configuration
"""
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = 0.8
# self.test_dataset = None
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
# self.test_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction="none")
self.loss_function = extend(self.loss_function)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
# Adam parameters
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1.0e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.writer = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def seed(self, seed=None):
"""
Set rng seed
Parameters
----------
seed:
seed for rng
"""
_, seed = seeding.np_random(seed)
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
def step(self, action):
"""
Execute environment step
Parameters
----------
action : list
action to execute
Returns
-------
np.array, float, bool, dict
state, reward, done, info
"""
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, float):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = 10 ** (-action)
self.current_lr = new_lr
delta_w = torch.mul(
new_lr,
self.firstOrderMomentum
/ (torch.sqrt(self.secondOrderMomentum) + self.epsilon),
)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index: index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return self.get_state(self), reward, done, {}
def reset(self):
"""
Reset environment
Returns
-------
np.array
Environment state
"""
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {"batch_size": self.batch_size}
validation_dataloader_args = {"batch_size": self.validation_batch_size}
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
# self.test_dataset = datasets.MNIST('../data', train=False, transform=transform)
else:
raise NotImplementedError
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
# self.test_loader = torch.utils.data.DataLoader(self.test_dataset, **train_dataloader_args)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
# Adam parameters
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.get_default_reward(self)
return self.get_state(self)
def set_writer(self, writer):
self.writer = writer
def close(self):
"""
No additional cleanup necessary
Returns
-------
bool
Cleanup flag
"""
return True
def render(self, mode: str = "human"):
"""
Render env in human mode
Parameters
----------
mode : str
Execution mode
"""
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
"""
Gather state description
Returns
-------
dict
Environment state
"""
gradients = self._get_gradients()
self.firstOrderMomentum, self.secondOrderMomentum = self._get_momentum(
gradients
)
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(
self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum
)
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
state = {
"predictiveChangeVarDiscountedAverage": predictiveChangeVarDiscountedAverage,
"predictiveChangeVarUncertainty": predictiveChangeVarUncertainty,
"lossVarDiscountedAverage": lossVarDiscountedAverage,
"lossVarUncertainty": lossVarUncertainty,
"currentLR": self.current_lr,
"trainingLoss": self.current_training_loss,
"validationLoss": self.current_validation_loss,
}
return state
def _set_zero_grad(self):
index = 0
for i, p in enumerate(self.model.parameters()):
if p.grad is None:
continue
layer_size = self.layer_sizes[i]
p.grad.zero_()
index += layer_size
def _train_batch_(self):
(data, target) = self.train_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
reward = self._get_validation_loss()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
return reward
def get_default_reward(self, _):
try:
reward = self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
reward = self._train_batch_()
return reward
def _get_val_loss(self):
self.model.eval()
validation_loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in self.validation_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss += self.loss_function(output, target).mean()
validation_loss /= len(self.validation_loader.dataset)
self.model.train()
return validation_loss
def _get_validation_loss_(self):
self.model.eval()
(data, target) = self.validation_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss = self.loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
self.current_validation_loss = validation_loss
self.model.train()
return -validation_loss.item() # negative because it is the reward
def _get_validation_loss(self):
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1 ** self.t)
bias_corrected_v = self.v / (1 - self.beta2 ** self.t)
return bias_corrected_m, bias_corrected_v
def _get_adam_feature(self, learning_rate, m, v):
epsilon = 1.0e-8
return torch.mul(learning_rate, m / (torch.sqrt(v) + epsilon))
def _get_loss_features(self):
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage) ** 2
)
return self.lossVarDiscountedAverage, self.lossVarUncertainty
def _get_predictive_change_features(self, lr, m, v):
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = self._get_adam_feature(lr, m, v)
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (predictive_change - self.predictiveChangeVarDiscountedAverage) ** 2
)
return (
self.predictiveChangeVarDiscountedAverage,
self.predictiveChangeVarUncertainty,
)
| 31.41189
| 100
| 0.59973
|
import math
import warnings
from functools import reduce
import numpy as np
import torch
from backpack import backpack, extend
from backpack.extensions import BatchGrad
from gym.utils import seeding
from torchvision import datasets, transforms
from dacbench import AbstractEnv
warnings.filterwarnings("ignore")
class SGDEnv(AbstractEnv):
def __init__(self, config):
super(SGDEnv, self).__init__(config)
self.batch_size = config.training_batch_size
self.validation_batch_size = config.validation_batch_size
self.no_cuda = config.no_cuda
self.current_batch_size = config.training_batch_size
self.env_seed = config.seed
self.seed(self.env_seed)
self.use_cuda = not self.no_cuda and torch.cuda.is_available()
self.device = torch.device("cuda" if self.use_cuda else "cpu")
self.training_validation_ratio = 0.8
self.train_dataset = None
self.validation_dataset = None
self.train_loader = None
self.validation_loader = None
self.train_loader_it = None
self.validation_loader_it = None
self.train_batch_index = 0
self.epoch_index = 0
self.current_training_loss = None
self.loss_batch = None
self.model = None
self.parameter_count = 0
self.layer_sizes = []
self.loss_function = torch.nn.NLLLoss(reduction="none")
self.loss_function = extend(self.loss_function)
self.initial_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.current_lr = config.lr * torch.ones(
1, device=self.device, requires_grad=False
)
self.beta1 = config.beta1
self.beta2 = config.beta2
self.m = 0
self.v = 0
self.epsilon = 1.0e-08
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.prev_descent = None
self.learning_rate = 0.001
self.predictiveChangeVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.predictiveChangeVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarDiscountedAverage = torch.zeros(
1, device=self.device, requires_grad=False
)
self.lossVarUncertainty = torch.zeros(
1, device=self.device, requires_grad=False
)
self.discount_factor = 0.9
self.firstOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.secondOrderMomentum = torch.zeros(
1, device=self.device, requires_grad=False
)
self.writer = None
if "reward_function" in config.keys():
self.get_reward = config["reward_function"]
else:
self.get_reward = self.get_default_reward
if "state_method" in config.keys():
self.get_state = config["state_method"]
else:
self.get_state = self.get_default_state
def seed(self, seed=None):
_, seed = seeding.np_random(seed)
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
return [seed]
def step(self, action):
done = super(SGDEnv, self).step_()
self.step_count += 1
index = 0
if not isinstance(action, float):
action = action[0]
action = torch.Tensor([action]).to(self.device)
new_lr = 10 ** (-action)
self.current_lr = new_lr
delta_w = torch.mul(
new_lr,
self.firstOrderMomentum
/ (torch.sqrt(self.secondOrderMomentum) + self.epsilon),
)
for i, p in enumerate(self.model.parameters()):
layer_size = self.layer_sizes[i]
p.data = p.data - delta_w[index: index + layer_size].reshape(
shape=p.data.shape
)
index += layer_size
self._set_zero_grad()
reward = self.get_reward(self)
return self.get_state(self), reward, done, {}
def reset(self):
super(SGDEnv, self).reset_()
dataset = self.instance[0]
instance_seed = self.instance[1]
construct_model = self.instance[2]
self.seed(instance_seed)
self.model = construct_model().to(self.device)
self.training_validation_ratio = 0.8
train_dataloader_args = {"batch_size": self.batch_size}
validation_dataloader_args = {"batch_size": self.validation_batch_size}
if self.use_cuda:
param = {"num_workers": 1, "pin_memory": True, "shuffle": True}
train_dataloader_args.update(param)
validation_dataloader_args.update(param)
if dataset == "MNIST":
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
)
train_dataset = datasets.MNIST(
"../data", train=True, download=True, transform=transform
)
else:
raise NotImplementedError
training_dataset_limit = math.floor(
len(train_dataset) * self.training_validation_ratio
)
validation_dataset_limit = len(train_dataset)
self.train_dataset = torch.utils.data.Subset(
train_dataset, range(0, training_dataset_limit - 1)
)
self.validation_dataset = torch.utils.data.Subset(
train_dataset, range(training_dataset_limit, validation_dataset_limit)
)
self.train_loader = torch.utils.data.DataLoader(
self.train_dataset, **train_dataloader_args
)
self.validation_loader = torch.utils.data.DataLoader(
self.validation_dataset, **validation_dataloader_args
)
self.train_batch_index = 0
self.epoch_index = 0
self.train_loader_it = iter(self.train_loader)
self.validation_loader_it = iter(self.validation_loader)
self.parameter_count = 0
self.layer_sizes = []
for p in self.model.parameters():
layer_size = reduce(lambda x, y: x * y, p.shape)
self.layer_sizes.append(layer_size)
self.parameter_count += layer_size
self.model = extend(self.model)
self._set_zero_grad()
self.model.train()
self.current_training_loss = None
self.loss_batch = None
self.m = 0
self.v = 0
self.t = 0
self.step_count = torch.zeros(1, device=self.device, requires_grad=False)
self.current_lr = self.initial_lr
self.prev_descent = torch.zeros(
(self.parameter_count,), device=self.device, requires_grad=False
)
self.get_default_reward(self)
return self.get_state(self)
def set_writer(self, writer):
self.writer = writer
def close(self):
return True
def render(self, mode: str = "human"):
if mode != "human":
raise NotImplementedError
pass
def get_default_state(self, _):
gradients = self._get_gradients()
self.firstOrderMomentum, self.secondOrderMomentum = self._get_momentum(
gradients
)
(
predictiveChangeVarDiscountedAverage,
predictiveChangeVarUncertainty,
) = self._get_predictive_change_features(
self.current_lr, self.firstOrderMomentum, self.secondOrderMomentum
)
lossVarDiscountedAverage, lossVarUncertainty = self._get_loss_features()
state = {
"predictiveChangeVarDiscountedAverage": predictiveChangeVarDiscountedAverage,
"predictiveChangeVarUncertainty": predictiveChangeVarUncertainty,
"lossVarDiscountedAverage": lossVarDiscountedAverage,
"lossVarUncertainty": lossVarUncertainty,
"currentLR": self.current_lr,
"trainingLoss": self.current_training_loss,
"validationLoss": self.current_validation_loss,
}
return state
def _set_zero_grad(self):
index = 0
for i, p in enumerate(self.model.parameters()):
if p.grad is None:
continue
layer_size = self.layer_sizes[i]
p.grad.zero_()
index += layer_size
def _train_batch_(self):
(data, target) = self.train_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
self.current_batch_size = data.size()[0]
output = self.model(data)
loss = self.loss_function(output, target)
with backpack(BatchGrad()):
loss.mean().backward()
loss_value = loss.mean()
reward = self._get_validation_loss()
self.loss_batch = loss
self.current_training_loss = torch.unsqueeze(loss_value.detach(), dim=0)
self.train_batch_index += 1
return reward
def get_default_reward(self, _):
try:
reward = self._train_batch_()
except StopIteration:
self.train_batch_index = 0
self.epoch_index += 1
self.train_loader_it = iter(self.train_loader)
reward = self._train_batch_()
return reward
def _get_val_loss(self):
self.model.eval()
validation_loss = torch.zeros(1, device=self.device, requires_grad=False)
with torch.no_grad():
for data, target in self.validation_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss += self.loss_function(output, target).mean()
validation_loss /= len(self.validation_loader.dataset)
self.model.train()
return validation_loss
def _get_validation_loss_(self):
self.model.eval()
(data, target) = self.validation_loader_it.next()
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
validation_loss = self.loss_function(output, target).mean()
validation_loss = torch.unsqueeze(validation_loss.detach(), dim=0)
self.current_validation_loss = validation_loss
self.model.train()
return -validation_loss.item()
def _get_validation_loss(self):
try:
validation_loss = self._get_validation_loss_()
except StopIteration:
self.validation_loader_it = iter(self.validation_loader)
validation_loss = self._get_validation_loss_()
return validation_loss
def _get_gradients(self):
gradients = []
for p in self.model.parameters():
if p.grad is None:
continue
gradients.append(p.grad.flatten())
gradients = torch.cat(gradients, dim=0)
return gradients
def _get_momentum(self, gradients):
self.t += 1
self.m = self.beta1 * self.m + (1 - self.beta1) * gradients
self.v = self.beta2 * self.v + (1 - self.beta2) * torch.square(gradients)
bias_corrected_m = self.m / (1 - self.beta1 ** self.t)
bias_corrected_v = self.v / (1 - self.beta2 ** self.t)
return bias_corrected_m, bias_corrected_v
def _get_adam_feature(self, learning_rate, m, v):
epsilon = 1.0e-8
return torch.mul(learning_rate, m / (torch.sqrt(v) + epsilon))
def _get_loss_features(self):
with torch.no_grad():
loss_var = torch.log(torch.var(self.loss_batch))
self.lossVarDiscountedAverage = (
self.discount_factor * self.lossVarDiscountedAverage
+ (1 - self.discount_factor) * loss_var
)
self.lossVarUncertainty = (
self.discount_factor * self.lossVarUncertainty
+ (1 - self.discount_factor)
* (loss_var - self.lossVarDiscountedAverage) ** 2
)
return self.lossVarDiscountedAverage, self.lossVarUncertainty
def _get_predictive_change_features(self, lr, m, v):
batch_gradients = []
for i, (name, param) in enumerate(self.model.named_parameters()):
grad_batch = param.grad_batch.reshape(
self.current_batch_size, self.layer_sizes[i]
)
batch_gradients.append(grad_batch)
batch_gradients = torch.cat(batch_gradients, dim=1)
update_value = self._get_adam_feature(lr, m, v)
predictive_change = torch.log(
torch.var(-1 * torch.matmul(batch_gradients, update_value))
)
self.predictiveChangeVarDiscountedAverage = (
self.discount_factor * self.predictiveChangeVarDiscountedAverage
+ (1 - self.discount_factor) * predictive_change
)
self.predictiveChangeVarUncertainty = (
self.discount_factor * self.predictiveChangeVarUncertainty
+ (1 - self.discount_factor)
* (predictive_change - self.predictiveChangeVarDiscountedAverage) ** 2
)
return (
self.predictiveChangeVarDiscountedAverage,
self.predictiveChangeVarUncertainty,
)
| true
| true
|
790b98c472cbe416f8e5c877f7e9519df3d4f93a
| 4,364
|
py
|
Python
|
contrib/seeds/generate-seeds.py
|
26rahulsingh/wuazi
|
ca3f34333ac63f6270692820bf11ca1a360472be
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
26rahulsingh/wuazi
|
ca3f34333ac63f6270692820bf11ca1a360472be
|
[
"MIT"
] | null | null | null |
contrib/seeds/generate-seeds.py
|
26rahulsingh/wuazi
|
ca3f34333ac63f6270692820bf11ca1a360472be
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef WAZ_CHAINPARAMSSEEDS_H\n')
g.write('#define WAZ_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the wuazi network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // WAZ_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.395683
| 98
| 0.581118
|
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr:
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr:
sub = [[], []]
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1):
continue
x += 1
assert(x < 2)
else:
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'):
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match:
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1:
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef WAZ_CHAINPARAMSSEEDS_H\n')
g.write('#define WAZ_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the wuazi network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // WAZ_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| true
| true
|
790b98d76451003e9355d48a63c424cb1e65400f
| 6,415
|
py
|
Python
|
google/ads/google_ads/v3/proto/services/campaign_criterion_simulation_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2019-11-30T23:42:39.000Z
|
2019-11-30T23:42:39.000Z
|
google/ads/google_ads/v3/proto/services/campaign_criterion_simulation_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/services/campaign_criterion_simulation_service_pb2.py
|
jphanwebstaurant/google-ads-python
|
600812b2afcc4d57f00b47dfe436620ce50bfe9b
|
[
"Apache-2.0"
] | 1
|
2020-09-30T17:04:06.000Z
|
2020-09-30T17:04:06.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/services/campaign_criterion_simulation_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import campaign_criterion_simulation_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/campaign_criterion_simulation_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\'CampaignCriterionSimulationServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\nRgoogle/ads/googleads_v3/proto/services/campaign_criterion_simulation_service.proto\x12 google.ads.googleads.v3.services\x1aKgoogle/ads/googleads_v3/proto/resources/campaign_criterion_simulation.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"|\n%GetCampaignCriterionSimulationRequest\x12S\n\rresource_name\x18\x01 \x01(\tB<\xe0\x41\x02\xfa\x41\x36\n4googleads.googleapis.com/CampaignCriterionSimulation2\xc5\x02\n\"CampaignCriterionSimulationService\x12\x81\x02\n\x1eGetCampaignCriterionSimulation\x12G.google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest\x1a>.google.ads.googleads.v3.resources.CampaignCriterionSimulation\"V\x82\xd3\xe4\x93\x02@\x12>/v3/{resource_name=customers/*/campaignCriterionSimulations/*}\xda\x41\rresource_name\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x8e\x02\n$com.google.ads.googleads.v3.servicesB\'CampaignCriterionSimulationServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETCAMPAIGNCRITERIONSIMULATIONREQUEST = _descriptor.Descriptor(
name='GetCampaignCriterionSimulationRequest',
full_name='google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A6\n4googleads.googleapis.com/CampaignCriterionSimulation'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=312,
serialized_end=436,
)
DESCRIPTOR.message_types_by_name['GetCampaignCriterionSimulationRequest'] = _GETCAMPAIGNCRITERIONSIMULATIONREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCampaignCriterionSimulationRequest = _reflection.GeneratedProtocolMessageType('GetCampaignCriterionSimulationRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCAMPAIGNCRITERIONSIMULATIONREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.campaign_criterion_simulation_service_pb2'
,
__doc__ = """Request message for
[CampaignCriterionSimulationService.GetCampaignCriterionSimulation][google.ads.googleads.v3.services.CampaignCriterionSimulationService.GetCampaignCriterionSimulation].
Attributes:
resource_name:
Required. The resource name of the campaign criterion
simulation to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest)
))
_sym_db.RegisterMessage(GetCampaignCriterionSimulationRequest)
DESCRIPTOR._options = None
_GETCAMPAIGNCRITERIONSIMULATIONREQUEST.fields_by_name['resource_name']._options = None
_CAMPAIGNCRITERIONSIMULATIONSERVICE = _descriptor.ServiceDescriptor(
name='CampaignCriterionSimulationService',
full_name='google.ads.googleads.v3.services.CampaignCriterionSimulationService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=439,
serialized_end=764,
methods=[
_descriptor.MethodDescriptor(
name='GetCampaignCriterionSimulation',
full_name='google.ads.googleads.v3.services.CampaignCriterionSimulationService.GetCampaignCriterionSimulation',
index=0,
containing_service=None,
input_type=_GETCAMPAIGNCRITERIONSIMULATIONREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2._CAMPAIGNCRITERIONSIMULATION,
serialized_options=_b('\202\323\344\223\002@\022>/v3/{resource_name=customers/*/campaignCriterionSimulations/*}\332A\rresource_name'),
),
])
_sym_db.RegisterServiceDescriptor(_CAMPAIGNCRITERIONSIMULATIONSERVICE)
DESCRIPTOR.services_by_name['CampaignCriterionSimulationService'] = _CAMPAIGNCRITERIONSIMULATIONSERVICE
# @@protoc_insertion_point(module_scope)
| 56.769912
| 1,247
| 0.833983
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.resources import campaign_criterion_simulation_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/services/campaign_criterion_simulation_service.proto',
package='google.ads.googleads.v3.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v3.servicesB\'CampaignCriterionSimulationServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V3.Services\312\002 Google\\Ads\\GoogleAds\\V3\\Services\352\002$Google::Ads::GoogleAds::V3::Services'),
serialized_pb=_b('\nRgoogle/ads/googleads_v3/proto/services/campaign_criterion_simulation_service.proto\x12 google.ads.googleads.v3.services\x1aKgoogle/ads/googleads_v3/proto/resources/campaign_criterion_simulation.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"|\n%GetCampaignCriterionSimulationRequest\x12S\n\rresource_name\x18\x01 \x01(\tB<\xe0\x41\x02\xfa\x41\x36\n4googleads.googleapis.com/CampaignCriterionSimulation2\xc5\x02\n\"CampaignCriterionSimulationService\x12\x81\x02\n\x1eGetCampaignCriterionSimulation\x12G.google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest\x1a>.google.ads.googleads.v3.resources.CampaignCriterionSimulation\"V\x82\xd3\xe4\x93\x02@\x12>/v3/{resource_name=customers/*/campaignCriterionSimulations/*}\xda\x41\rresource_name\x1a\x1b\xca\x41\x18googleads.googleapis.comB\x8e\x02\n$com.google.ads.googleads.v3.servicesB\'CampaignCriterionSimulationServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v3/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V3.Services\xca\x02 Google\\Ads\\GoogleAds\\V3\\Services\xea\x02$Google::Ads::GoogleAds::V3::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETCAMPAIGNCRITERIONSIMULATIONREQUEST = _descriptor.Descriptor(
name='GetCampaignCriterionSimulationRequest',
full_name='google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A6\n4googleads.googleapis.com/CampaignCriterionSimulation'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=312,
serialized_end=436,
)
DESCRIPTOR.message_types_by_name['GetCampaignCriterionSimulationRequest'] = _GETCAMPAIGNCRITERIONSIMULATIONREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCampaignCriterionSimulationRequest = _reflection.GeneratedProtocolMessageType('GetCampaignCriterionSimulationRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCAMPAIGNCRITERIONSIMULATIONREQUEST,
__module__ = 'google.ads.googleads_v3.proto.services.campaign_criterion_simulation_service_pb2'
,
__doc__ = """Request message for
[CampaignCriterionSimulationService.GetCampaignCriterionSimulation][google.ads.googleads.v3.services.CampaignCriterionSimulationService.GetCampaignCriterionSimulation].
Attributes:
resource_name:
Required. The resource name of the campaign criterion
simulation to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.services.GetCampaignCriterionSimulationRequest)
))
_sym_db.RegisterMessage(GetCampaignCriterionSimulationRequest)
DESCRIPTOR._options = None
_GETCAMPAIGNCRITERIONSIMULATIONREQUEST.fields_by_name['resource_name']._options = None
_CAMPAIGNCRITERIONSIMULATIONSERVICE = _descriptor.ServiceDescriptor(
name='CampaignCriterionSimulationService',
full_name='google.ads.googleads.v3.services.CampaignCriterionSimulationService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=439,
serialized_end=764,
methods=[
_descriptor.MethodDescriptor(
name='GetCampaignCriterionSimulation',
full_name='google.ads.googleads.v3.services.CampaignCriterionSimulationService.GetCampaignCriterionSimulation',
index=0,
containing_service=None,
input_type=_GETCAMPAIGNCRITERIONSIMULATIONREQUEST,
output_type=google_dot_ads_dot_googleads__v3_dot_proto_dot_resources_dot_campaign__criterion__simulation__pb2._CAMPAIGNCRITERIONSIMULATION,
serialized_options=_b('\202\323\344\223\002@\022>/v3/{resource_name=customers/*/campaignCriterionSimulations/*}\332A\rresource_name'),
),
])
_sym_db.RegisterServiceDescriptor(_CAMPAIGNCRITERIONSIMULATIONSERVICE)
DESCRIPTOR.services_by_name['CampaignCriterionSimulationService'] = _CAMPAIGNCRITERIONSIMULATIONSERVICE
# @@protoc_insertion_point(module_scope)
| true
| true
|
790b99b056afe3a626533ebe8e9f5d288f652b02
| 3,533
|
py
|
Python
|
infer.py
|
jomavera/DRL_HFV
|
043e32805ec79fd35281b864659c194d7b89f5bc
|
[
"MIT"
] | 114
|
2020-02-12T08:55:11.000Z
|
2022-02-28T02:05:30.000Z
|
infer.py
|
jomavera/DRL_HFV
|
043e32805ec79fd35281b864659c194d7b89f5bc
|
[
"MIT"
] | null | null | null |
infer.py
|
jomavera/DRL_HFV
|
043e32805ec79fd35281b864659c194d7b89f5bc
|
[
"MIT"
] | 6
|
2020-02-14T19:25:30.000Z
|
2021-10-04T14:54:00.000Z
|
import numpy as np
from env import Env
from models import PolicyNet, Critic
from utils import one_hot
import torch
from torch.optim import Adam
import time
import os
from datetime import datetime
import math
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#------------------------SET PARAMETERS----------------------------
SEED = 17
BATCH_SIZE = 128
N_NODES = 11
N_DEPOT = 1
NUM_LAYERS = 1
CAPACITY = [20,15,10]
MAX_DEMAND = 10
N_VEHICLES = len(CAPACITY)
DIM_STATIC = 2
DIM_DYNAMIC = 1 + N_VEHICLES
DIM_LOAD = N_VEHICLES
DIM_EMBED = 128
MAX_EP_lEN = 16
GAMMA = 0.99
ENTROPY_REG = 0.01
MAX_GRAD_NORM = 2
DROPOUT = 0.1
EMBED_TYPE = 'conv1d'
LOG_INTERVAL = 200
#----------------INITIALIZE ENVIROMENT AND POLICIES----------------
env_test = Env(seed = SEED, batch_size = BATCH_SIZE, capacity = CAPACITY,
n_nodes = N_NODES, n_depot = N_DEPOT, max_demand = MAX_DEMAND, n_agents = N_VEHICLES)
policy = [PolicyNet(batch_size = BATCH_SIZE, n_nodes = N_NODES, n_agents=N_VEHICLES, num_layers = NUM_LAYERS,
dim_s = DIM_STATIC, dim_d = DIM_DYNAMIC,
dim_embed = DIM_EMBED, n_glimpses = 0, embeding_type=EMBED_TYPE,
dropout = DROPOUT).to(device) for i in range(N_VEHICLES)]
#------------------LOAD TRAINDEL MODEL---------------------------
model_dir = 'weights/model_exp_1.pt'
policy_name = "policy_agent_X"
if os.path.isfile(model_dir):
checkpoint = torch.load(model_dir,map_location=device)
else:
raise ValueError('No model file!')
for agent_id in range(N_VEHICLES):
p_name = policy_name.replace("X",str(agent_id))
policy[agent_id].load_state_dict(checkpoint[p_name])
#-----------------RUN TRAINED POLICY----------------
num_epochs = math.ceil(1000/BATCH_SIZE)
total_tests = []
total_times = []
for i in range(num_epochs):
start = time.time()
o_t, d_t, r_t = env_test.reset(), False, 0
actions_ep = []
log_probs_ep = []
rewards_ep = []
values_ep = []
last_hh_t = [None]*N_VEHICLES
for t in range(int(MAX_EP_lEN) ):
actions = []
actions_one_hot = []
log_probs = []
values = []
for agent_id in range(N_VEHICLES) :
model = policy[agent_id].eval()
logits, prob , log_p, last_hh_t[agent_id] = model(o_t, last_hh_t[agent_id], agent_id)
#--------- GREEDY POLICY ------------
act = torch.argmax(prob, dim =1) # [ batch size ]
actions.append(act.detach())
ot_2, d_t, r_t = env_test.step(act.detach().unsqueeze(1), agent_id)
o_t = ot_2
values.append( r_t )
r_step = torch.stack(values, dim = 1) #[batch_size, n_agents]
a = torch.stack(actions, dim = 1) #[batch_size, n_agents]
actions_ep.append(a)
rewards_ep.append(r_step)
end = time.time()
rewards = torch.stack(rewards_ep, dim = 2 ).sum(dim=2).sum(dim=1) #[batch_size, n_agents, ep_len]
total_tests.append(rewards)
total_times.append((end-start)/BATCH_SIZE)
#------------------- SAVE RESULTS -----------------------
rewards_total = torch.stack(total_tests, dim=1).reshape(-1,)
np_results = rewards_total.numpy()
np.save('vrp_results_RL',np_results)
np_runtimes = np.array(total_times).reshape(-1,)
np.save('vrp_runtimes_RL',np_runtimes)
| 33.018692
| 109
| 0.589301
|
import numpy as np
from env import Env
from models import PolicyNet, Critic
from utils import one_hot
import torch
from torch.optim import Adam
import time
import os
from datetime import datetime
import math
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
SEED = 17
BATCH_SIZE = 128
N_NODES = 11
N_DEPOT = 1
NUM_LAYERS = 1
CAPACITY = [20,15,10]
MAX_DEMAND = 10
N_VEHICLES = len(CAPACITY)
DIM_STATIC = 2
DIM_DYNAMIC = 1 + N_VEHICLES
DIM_LOAD = N_VEHICLES
DIM_EMBED = 128
MAX_EP_lEN = 16
GAMMA = 0.99
ENTROPY_REG = 0.01
MAX_GRAD_NORM = 2
DROPOUT = 0.1
EMBED_TYPE = 'conv1d'
LOG_INTERVAL = 200
env_test = Env(seed = SEED, batch_size = BATCH_SIZE, capacity = CAPACITY,
n_nodes = N_NODES, n_depot = N_DEPOT, max_demand = MAX_DEMAND, n_agents = N_VEHICLES)
policy = [PolicyNet(batch_size = BATCH_SIZE, n_nodes = N_NODES, n_agents=N_VEHICLES, num_layers = NUM_LAYERS,
dim_s = DIM_STATIC, dim_d = DIM_DYNAMIC,
dim_embed = DIM_EMBED, n_glimpses = 0, embeding_type=EMBED_TYPE,
dropout = DROPOUT).to(device) for i in range(N_VEHICLES)]
model_dir = 'weights/model_exp_1.pt'
policy_name = "policy_agent_X"
if os.path.isfile(model_dir):
checkpoint = torch.load(model_dir,map_location=device)
else:
raise ValueError('No model file!')
for agent_id in range(N_VEHICLES):
p_name = policy_name.replace("X",str(agent_id))
policy[agent_id].load_state_dict(checkpoint[p_name])
num_epochs = math.ceil(1000/BATCH_SIZE)
total_tests = []
total_times = []
for i in range(num_epochs):
start = time.time()
o_t, d_t, r_t = env_test.reset(), False, 0
actions_ep = []
log_probs_ep = []
rewards_ep = []
values_ep = []
last_hh_t = [None]*N_VEHICLES
for t in range(int(MAX_EP_lEN) ):
actions = []
actions_one_hot = []
log_probs = []
values = []
for agent_id in range(N_VEHICLES) :
model = policy[agent_id].eval()
logits, prob , log_p, last_hh_t[agent_id] = model(o_t, last_hh_t[agent_id], agent_id)
act = torch.argmax(prob, dim =1)
actions.append(act.detach())
ot_2, d_t, r_t = env_test.step(act.detach().unsqueeze(1), agent_id)
o_t = ot_2
values.append( r_t )
r_step = torch.stack(values, dim = 1)
a = torch.stack(actions, dim = 1)
actions_ep.append(a)
rewards_ep.append(r_step)
end = time.time()
rewards = torch.stack(rewards_ep, dim = 2 ).sum(dim=2).sum(dim=1)
total_tests.append(rewards)
total_times.append((end-start)/BATCH_SIZE)
rewards_total = torch.stack(total_tests, dim=1).reshape(-1,)
np_results = rewards_total.numpy()
np.save('vrp_results_RL',np_results)
np_runtimes = np.array(total_times).reshape(-1,)
np.save('vrp_runtimes_RL',np_runtimes)
| true
| true
|
790b99b7c8510d3b99bd51ef86e99adaa01fb768
| 183
|
py
|
Python
|
modules/isrunning.py
|
ShaderLight/autochampselect
|
b7d346cc99011b5f84867f3a01dc2e8d815c05d7
|
[
"MIT"
] | null | null | null |
modules/isrunning.py
|
ShaderLight/autochampselect
|
b7d346cc99011b5f84867f3a01dc2e8d815c05d7
|
[
"MIT"
] | null | null | null |
modules/isrunning.py
|
ShaderLight/autochampselect
|
b7d346cc99011b5f84867f3a01dc2e8d815c05d7
|
[
"MIT"
] | null | null | null |
from subprocess import check_output
def isrunning(processName):
tasklist = check_output('tasklist', shell=False)
tasklist = str(tasklist)
return(processName in tasklist)
| 26.142857
| 52
| 0.759563
|
from subprocess import check_output
def isrunning(processName):
tasklist = check_output('tasklist', shell=False)
tasklist = str(tasklist)
return(processName in tasklist)
| true
| true
|
790b9a86278c650b398b522e03eac34f482469c7
| 9,608
|
py
|
Python
|
python/tests/kat/t_redirect.py
|
imoisharma/emissary
|
5346ccb06673827a6a2e51ddaf92925f60bd9de9
|
[
"Apache-2.0"
] | 3,438
|
2017-04-23T23:10:18.000Z
|
2021-06-02T10:11:45.000Z
|
python/tests/kat/t_redirect.py
|
imoisharma/emissary
|
5346ccb06673827a6a2e51ddaf92925f60bd9de9
|
[
"Apache-2.0"
] | 1,906
|
2017-04-11T17:47:54.000Z
|
2021-06-02T14:20:11.000Z
|
python/tests/kat/t_redirect.py
|
imoisharma/emissary
|
5346ccb06673827a6a2e51ddaf92925f60bd9de9
|
[
"Apache-2.0"
] | 591
|
2017-04-17T17:50:08.000Z
|
2021-06-01T08:20:34.000Z
|
from kat.harness import Query, EDGE_STACK
from abstract_tests import AmbassadorTest, HTTP
from abstract_tests import ServiceType
from selfsigned import TLSCerts
from kat.utils import namespace_manifest
#####
# XXX This file is annoying.
#
# RedirectTestsWithProxyProto and RedirectTestsInvalidSecret used to be subclasses of RedirectTests,
# which makes a certain amount of sense. Problem is that when I wanted to modify just RedirectTests
# to have secrets defined, that ended up affecting the two subclasses in bad ways. There's basically
# no way to subclass an AmbassadorTest without having your base class be run separately, which isn't
# what I wanted here. Sigh.
class RedirectTests(AmbassadorTest):
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def manifests(self):
return namespace_manifest("redirect-namespace") + f"""
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
namespace: redirect-namespace
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
""" + super().manifests()
def config(self):
# Use self here, not self.target, because we want the TLS module to
# be annotated on the Ambassador itself.
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: redirect-cert
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/", scheme="http"), expected=301)
# [1] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors",
scheme="https"),
insecure=True,
phase=2)
def check(self):
# For query 0, check the redirection target.
assert len(self.results[0].headers['Location']) > 0
assert self.results[0].headers['Location'][0].find('/tls-target/') > 0
# For query 1, we require no errors.
# XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
errors = self.results[1].json
assert(len(errors) == 0)
class RedirectTestsWithProxyProto(AmbassadorTest):
target: ServiceType
def init(self):
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: ambassador
config:
use_proxy_proto: true
enable_ipv6: true
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# TODO (concaf): FWIW, this query only covers one side of the story. This tests that this is the correct
# deviation from the normal behavior (301 response), but does not test a 301 when proxy proto is actually sent.
# This is because net/http does not yet support adding proxy proto to HTTP requests, and hence it's difficult
# to test with kat. We will need to open a raw TCP connection (e.g. telnet/nc) and send the entire HTTP Request
# in plaintext to test this behavior (or use curl with --haproxy-protocol).
yield Query(self.url("tls-target/"), error=[ "EOF", "connection reset by peer" ])
# We can't do the error check until we have the PROXY client mentioned above.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class RedirectTestsInvalidSecret(AmbassadorTest):
"""
This test tests that even if the specified secret is invalid, the rest of TLS Context should
go through. In this case, even though the secret does not exist, redirect_cleartext_from
should still take effect.
"""
target: ServiceType
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: does-not-exist-secret
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/"), expected=301)
# There's kind of no way to do this. Looks like we need to speak HTTP to the port on which we
# think the server is listening for HTTPS? This is a bad config all the way around, really.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors", scheme="https"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class XFPRedirect(AmbassadorTest):
parent: AmbassadorTest
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.target = HTTP()
self.add_default_http_listener = False
self.add_default_https_listener = False
def manifests(self):
return self.format('''
---
apiVersion: getambassador.io/v3alpha1
kind: Listener
metadata:
name: ambassador-listener-8080
spec:
ambassador_id: [{self.ambassador_id}]
port: 8080
protocol: HTTP
securityModel: XFP
l7Depth: 1
hostBinding:
namespace:
from: ALL
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: weird-xfp-test-host
spec:
ambassador_id: [{self.ambassador_id}]
requestPolicy:
insecure:
action: Redirect
''') + super().manifests()
def config(self):
yield self.target, self.format("""
kind: Module
name: ambassador
config:
use_remote_address: false
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "http" }, expected=301)
# [1]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "https" }, expected=200)
# [2] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={ "X-Forwarded-Proto": "https" }, phase=2)
def check(self):
# For query 0, check the redirection target.
expected_location = ["https://" + self.path.fqdn + "/" + self.name + "/target/"]
actual_location = self.results[0].headers['Location']
assert actual_location == expected_location, "Expected redirect location to be {}, got {} instead".format(
expected_location,
actual_location
)
# For query 1, we don't have to check anything, the "expected" clause is enough.
# For query 2, we require no errors.
# XXX Ew. If self.results[2].json is empty, the harness won't convert it to a response.
errors = self.results[2].json
assert(len(errors) == 0)
def requirements(self):
# We're replacing super()'s requirements deliberately here: we need the XFP header or they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"X-Forwarded-Proto": "https"}))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"X-Forwarded-Proto": "https"}))
| 31.398693
| 127
| 0.658097
|
from kat.harness import Query, EDGE_STACK
from abstract_tests import AmbassadorTest, HTTP
from abstract_tests import ServiceType
from selfsigned import TLSCerts
from kat.utils import namespace_manifest
way to subclass an AmbassadorTest without having your base class be run separately, which isn't
class RedirectTests(AmbassadorTest):
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def manifests(self):
return namespace_manifest("redirect-namespace") + f"""
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
namespace: redirect-namespace
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
""" + super().manifests()
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: redirect-cert
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
yield Query(self.url("tls-target/", scheme="http"), expected=301)
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors",
scheme="https"),
insecure=True,
phase=2)
def check(self):
assert len(self.results[0].headers['Location']) > 0
assert self.results[0].headers['Location'][0].find('/tls-target/') > 0
errors = self.results[1].json
assert(len(errors) == 0)
class RedirectTestsWithProxyProto(AmbassadorTest):
target: ServiceType
def init(self):
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: ambassador
config:
use_proxy_proto: true
enable_ipv6: true
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# TODO (concaf): FWIW, this query only covers one side of the story. This tests that this is the correct
# deviation from the normal behavior (301 response), but does not test a 301 when proxy proto is actually sent.
# This is because net/http does not yet support adding proxy proto to HTTP requests, and hence it's difficult
yield Query(self.url("tls-target/"), error=[ "EOF", "connection reset by peer" ])
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
dorTest):
target: ServiceType
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: does-not-exist-secret
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/"), expected=301)
# There's kind of no way to do this. Looks like we need to speak HTTP to the port on which we
.results[1].json is empty, the harness won't convert it to a response.
class XFPRedirect(AmbassadorTest):
parent: AmbassadorTest
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.target = HTTP()
self.add_default_http_listener = False
self.add_default_https_listener = False
def manifests(self):
return self.format('''
---
apiVersion: getambassador.io/v3alpha1
kind: Listener
metadata:
name: ambassador-listener-8080
spec:
ambassador_id: [{self.ambassador_id}]
port: 8080
protocol: HTTP
securityModel: XFP
l7Depth: 1
hostBinding:
namespace:
from: ALL
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: weird-xfp-test-host
spec:
ambassador_id: [{self.ambassador_id}]
requestPolicy:
insecure:
action: Redirect
''') + super().manifests()
def config(self):
yield self.target, self.format("""
kind: Module
name: ambassador
config:
use_remote_address: false
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def queries(self):
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "http" }, expected=301)
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "https" }, expected=200)
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={ "X-Forwarded-Proto": "https" }, phase=2)
def check(self):
expected_location = ["https://" + self.path.fqdn + "/" + self.name + "/target/"]
actual_location = self.results[0].headers['Location']
assert actual_location == expected_location, "Expected redirect location to be {}, got {} instead".format(
expected_location,
actual_location
)
# For query 2, we require no errors.
# XXX Ew. If self.results[2].json is empty, the harness won't convert it to a response.
errors = self.results[2].json
assert(len(errors) == 0)
def requirements(self):
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"X-Forwarded-Proto": "https"}))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"X-Forwarded-Proto": "https"}))
| true
| true
|
790b9aabc2f6f1044efc0d8b453c3efab8b0f977
| 15,595
|
py
|
Python
|
scope/timecourse/base_handler.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | null | null | null |
scope/timecourse/base_handler.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | null | null | null |
scope/timecourse/base_handler.py
|
drew-sinha/rpc-scope
|
268864097b5b7d123a842f216adc446ec6b32d01
|
[
"MIT"
] | null | null | null |
# This code is licensed under the MIT License (see LICENSE file for details)
import concurrent.futures as futures
import contextlib
import inspect
import json
import logging
import pathlib
import platform
import sys
import time
from zplib import datafile
from zplib.image import threaded_io
from elegant import load_data
from ..util import log_util
from ..util import timer
class DummyIO:
def __init__(self, logger):
self.logger = logger
def write(self, *args, **kws):
self.logger.warning('Trying to write files, but file writing was disabled!')
def wait(self):
return
class TimepointHandler:
IMAGE_COMPRESSION = threaded_io.COMPRESSION.DEFAULT
LOG_LEVEL = logging.INFO
IO_THREADS = 4
MAX_IO_JOBS = 256 # max pending image writes before the threaded IO will block.
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False):
"""Setup the basic code to take a single timepoint from a timecourse experiment.
Parameters:
data_dir: directory where the data and metadata-files should be read/written.
io_threads: number of threads to use to save image data out.
loglevel: level from logging library at which to log information to the
logfile in data_dir. (Subclasses can log information with self.logger)
If not specified, fall back to the class attribute LOG_LEVEL. This
allows a subclass to set a default log level, which still can be
over-ridden from the command line.
scope_host: IP address to connect to the scope server. If None, run without
a scope server.
dry_run: if True, do not write any files (including log files; log entries
will be printed to the console).
"""
self.data_dir = pathlib.Path(data_dir).resolve() # get an absolute path
self.experiment_metadata_path = self.data_dir / 'experiment_metadata.json'
with self.experiment_metadata_path.open('r') as f:
self.experiment_metadata = json.load(f)
self.experiment_metadata['node'] = platform.node()
self.positions = self.experiment_metadata['positions'] # dict mapping names to (x,y,z) stage positions
self.skip_positions = set()
annotations = load_data.read_annotations(self.data_dir)
for position in self.positions.keys():
if position in annotations:
position_annotations, timepoint_annotations = annotations[position]
if position_annotations.get('exclude'):
self.skip_positions.add(position)
else:
for annotation in timepoint_annotations.values():
if annotation.get('stage') == 'dead':
self.skip_positions.add(position)
break
if scope_host is not None:
from .. import scope_client
self.scope = scope_client.ScopeClient(scope_host)
if hasattr(self.scope, 'camera'):
self.scope.camera.return_to_default_state()
else:
self.scope = None
self.write_files = not dry_run
self.logger = log_util.get_logger(str(data_dir))
if log_level is None:
log_level = self.LOG_LEVEL
elif isinstance(log_level, str):
log_level = getattr(logging, log_level)
self.logger.setLevel(log_level)
if self.write_files:
self.image_io = threaded_io.ThreadedIO(self.IO_THREADS, self.MAX_IO_JOBS)
handler = logging.FileHandler(str(self.data_dir/'acquisitions.log'))
else:
self.image_io = DummyIO(self.logger)
handler = logging.StreamHandler()
handler.setFormatter(log_util.get_formatter())
self.logger.addHandler(handler)
self._job_thread = None
def heartbeat(self):
print('heartbeat') # write a line to stdout to serve as a heartbeat
@contextlib.contextmanager
def heartbeat_timer(self):
heartbeat_timer = timer.Timer(self.heartbeat, interval=60)
yield
heartbeat_timer.stop()
@contextlib.contextmanager
def debug_timing(self, task):
t0 = time.time()
yield
self.logger.debug(f'{task} complete ({{:.1f}} seconds)', time.time() - t0)
def run_all_positions(self):
for position_name, position_coords in sorted(self.positions.items()):
if position_name not in self.skip_positions:
self.logger.info(f'Acquiring Position {position_name}')
with self.debug_timing(f'Position {position_name}'):
self.run_position(position_name, position_coords)
self.heartbeat()
def run_timepoint(self, scheduled_start):
try:
self.heartbeat()
self.timepoint_prefix = time.strftime('%Y-%m-%dt%H%M')
self.scheduled_start = scheduled_start
self.start_time = time.time()
self._job_futures = []
self.logger.info('Starting timepoint {} ({:.0f} minutes after scheduled)', self.timepoint_prefix,
(self.start_time-self.scheduled_start)/60)
# record the timepoint prefix and timestamp for this timepoint into the
# experiment metadata
self.experiment_metadata.setdefault('timepoints', []).append(self.timepoint_prefix)
self.experiment_metadata.setdefault('timestamps', []).append(self.start_time)
self.logger.info('Configuring timepoint')
with self.debug_timing('Configuration'):
self.configure_timepoint()
self.heartbeat()
self.run_all_positions()
self.finalize_timepoint()
self.heartbeat()
self.end_time = time.time()
self.experiment_metadata.setdefault('durations', []).append(self.end_time - self.start_time)
if self.write_files:
self._write_atomic_json(self.experiment_metadata_path, self.experiment_metadata)
run_again = self.skip_positions != self.positions.keys() # don't run again if we're skipping all the positions
# wait for all queued background jobs to complete.
with self.debug_timing('Image IO'), self.heartbeat_timer():
self.image_io.wait()
if self._job_futures:
# wait for all queued background jobs to complete.
with self.debug_timing('Background jobs'), self.heartbeat_timer():
futures.wait(self._job_futures)
# now get the result() from each future, which will raise any errors encountered
# during the execution.
[f.result() for f in self._job_futures]
self.cleanup()
self.logger.info('Timepoint {} ended ({:.0f} minutes after starting)', self.timepoint_prefix,
(time.time()-self.start_time)/60)
if run_again:
return self.get_next_run_time()
except:
self.logger.error('Exception in timepoint:', exc_info=True)
raise
def add_background_job(self, function, *args, **kws):
"""Add a function with parameters *args and **kws to a queue to be completed
asynchronously with the rest of the timepoint acquisition. This will be
run in a background thread, so make sure that the function acts in a
threadsafe manner. (NB: self.logger *is* thread-safe.)
All queued functions will be waited for completion before the timepoint
ends. Any exceptions will be propagated to the foreground after all
functions queued either finish or raise an exception.
"""
if self._job_thread is None:
self._job_thread = futures.ThreadPoolExecutor(max_workers=1)
self._job_futures.append(self._job_thread.submit(function, *args, **kws))
def _position_metadata(self, position_name):
position_dir = self.data_dir / position_name
metadata_path = position_dir / 'position_metadata.json'
if metadata_path.exists():
with metadata_path.open('r') as f:
position_metadata = json.load(f)
else:
position_metadata = []
return position_dir, metadata_path, position_metadata
def run_position(self, position_name, position_coords):
"""Do everything required for taking a timepoint at a single position
EXCEPT focusing / image acquisition. This includes moving the stage to
the right x,y position, loading and saving metadata, and saving image
data, as generated by acquire_images()"""
timestamp = time.time()
position_dir, metadata_path, position_metadata = self._position_metadata(position_name)
position_dir.mkdir(exist_ok=True)
if self.scope is not None:
with self.debug_timing('Stage positioning'):
self.scope.stage.position = position_coords
images, image_names, new_metadata = self.acquire_images(position_name, position_dir, position_metadata)
new_metadata['timestamp'] = timestamp
new_metadata['timepoint'] = self.timepoint_prefix
position_metadata.append(new_metadata)
self.finalize_acquisition(position_name, position_dir, position_metadata)
image_paths = [position_dir / (self.timepoint_prefix + ' ' + name) for name in image_names]
if new_metadata is None:
new_metadata = {}
if self.write_files:
self.image_io.write(images, image_paths, self.IMAGE_COMPRESSION)
self._write_atomic_json(metadata_path, position_metadata)
def _write_atomic_json(self, out_path, data):
datafile.json_encode_atomic_legible_to_file(data, out_path)
def configure_timepoint(self):
"""Override this method with global configuration for the image acquisitions
(e.g. camera configuration). Member variables 'scope', 'experiment_metadata',
'timepoint_prefix', and 'positions' may be specifically useful."""
pass
def finalize_timepoint(self):
"""Override this method with global finalization after the images have been
acquired for each position. Useful for altering the self.experiment_metadata
dictionary before it is saved out.
"""
pass
def finalize_acquisition(self, position_name, position_dir, position_metadata):
"""Called after acquiring images for a single postiion.
Parameters:
position_name: name of the position in the experiment metadata file.
position_dir: pathlib.Path object representing the directory where
position-specific data files and outputs are written. Useful for
reading previous image data.
position_metadata: list of all the stored position metadata from the
previous timepoints, in chronological order. This includes data
from the latest timepoint, accessible as: position_metadata[-1].
"""
pass
def cleanup(self):
"""Override this method with any global cleanup/finalization tasks
that may be necessary."""
pass
def get_next_run_time(self):
"""Override this method to return when the next timepoint run should be
scheduled. Returning None means no future runs will be scheduled."""
return None
def acquire_images(self, position_name, position_dir, position_metadata):
"""Override this method in a subclass to define the image-acquisition sequence.
All most subclasses will need to do is return the following as a tuple:
(images, image_names, new_metadata), where:
images is a list of the acquired images
image_names is a list of the generic names for each of these images
(not timepoint- or position-specific; e.g. 'GFP.png' or some such)
new_metadata is a dictionary of timepoint-specific information, such
as the latest focal plane z-position or similar. This will be
made available to future acquisition runs via the 'position_metadata'
argument described below.
The images and metadata will be written out by the superclass, and
must not be written by the overriding subclass.
Optionally, subclasses may choose to enter 'position_name' into the
self.skip_positions set to indicate that in the future this position
should not be acquired. (E.g. the worm is dead.)
Parameters:
position_name: identifier for this image-acquisition position. Useful
for adding this position to the skip_positions set.
position_dir: pathlib.Path object representing the directory where
position-specific data files and outputs should be written. Useful
only if additional data needs to be read in or out during
acquisition. (E.g. a background model or similar.)
position_metadata: list of all the stored position metadata from the
previous timepoints, in chronological order. In particular, this
dictionary is guaranteed to contain 'timestamp' which is the
time.time() at which that acquisition was started. Other values
(such as the latest focal plane) stored by previous acquisition
runs will also be available. The most recent metadata will be in
position_metadata[-1].
"""
raise NotImplementedError()
@classmethod
def main(cls, timepoint_dir=None, **cls_init_args):
"""Main method to run a timepoint.
Parse sys.argv to find an (optional) scheduled_start time as a positional
argument. Any arguments that contain an '=' will be assumed to be
python variable definitions to pass to the class init method. (Leading
'-' or '--' will be stripped, and internal '-'s will be converted to '_'.)
e.g. this allows the following usage: ./acquire.py --dry-run=True --log-level=logging.DEBUG
Parameters:
timepoint_dir: location of timepoint directory. If not specified, default
to the parent dir of the file that defines the class that this
method is called on.
**cls_init_args: dict of arguments to pass to the class init method.
"""
if timepoint_dir is None:
timepoint_dir = pathlib.Path(inspect.getfile(cls)).parent
scheduled_start = None
for arg in sys.argv[1:]:
if arg.count('='):
while arg.startswith('-'):
arg = arg[1:]
arg = arg.replace('-', '_')
# execute the argument in a restricted namespace containing only 'logging', and store the
# result in the args to pass to the class.
exec(arg, dict(logging=logging), cls_init_args)
elif scheduled_start is None:
scheduled_start = float(arg)
else:
raise ValueError('More than one schedule start time provided')
if scheduled_start is None:
scheduled_start = time.time()
handler = cls(timepoint_dir, **cls_init_args)
next_run_time = handler.run_timepoint(scheduled_start)
if next_run_time:
print('next run:{}'.format(next_run_time))
| 47.837423
| 122
| 0.648092
|
import concurrent.futures as futures
import contextlib
import inspect
import json
import logging
import pathlib
import platform
import sys
import time
from zplib import datafile
from zplib.image import threaded_io
from elegant import load_data
from ..util import log_util
from ..util import timer
class DummyIO:
def __init__(self, logger):
self.logger = logger
def write(self, *args, **kws):
self.logger.warning('Trying to write files, but file writing was disabled!')
def wait(self):
return
class TimepointHandler:
IMAGE_COMPRESSION = threaded_io.COMPRESSION.DEFAULT
LOG_LEVEL = logging.INFO
IO_THREADS = 4
MAX_IO_JOBS = 256
def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False):
self.data_dir = pathlib.Path(data_dir).resolve()
self.experiment_metadata_path = self.data_dir / 'experiment_metadata.json'
with self.experiment_metadata_path.open('r') as f:
self.experiment_metadata = json.load(f)
self.experiment_metadata['node'] = platform.node()
self.positions = self.experiment_metadata['positions']
self.skip_positions = set()
annotations = load_data.read_annotations(self.data_dir)
for position in self.positions.keys():
if position in annotations:
position_annotations, timepoint_annotations = annotations[position]
if position_annotations.get('exclude'):
self.skip_positions.add(position)
else:
for annotation in timepoint_annotations.values():
if annotation.get('stage') == 'dead':
self.skip_positions.add(position)
break
if scope_host is not None:
from .. import scope_client
self.scope = scope_client.ScopeClient(scope_host)
if hasattr(self.scope, 'camera'):
self.scope.camera.return_to_default_state()
else:
self.scope = None
self.write_files = not dry_run
self.logger = log_util.get_logger(str(data_dir))
if log_level is None:
log_level = self.LOG_LEVEL
elif isinstance(log_level, str):
log_level = getattr(logging, log_level)
self.logger.setLevel(log_level)
if self.write_files:
self.image_io = threaded_io.ThreadedIO(self.IO_THREADS, self.MAX_IO_JOBS)
handler = logging.FileHandler(str(self.data_dir/'acquisitions.log'))
else:
self.image_io = DummyIO(self.logger)
handler = logging.StreamHandler()
handler.setFormatter(log_util.get_formatter())
self.logger.addHandler(handler)
self._job_thread = None
def heartbeat(self):
print('heartbeat')
@contextlib.contextmanager
def heartbeat_timer(self):
heartbeat_timer = timer.Timer(self.heartbeat, interval=60)
yield
heartbeat_timer.stop()
@contextlib.contextmanager
def debug_timing(self, task):
t0 = time.time()
yield
self.logger.debug(f'{task} complete ({{:.1f}} seconds)', time.time() - t0)
def run_all_positions(self):
for position_name, position_coords in sorted(self.positions.items()):
if position_name not in self.skip_positions:
self.logger.info(f'Acquiring Position {position_name}')
with self.debug_timing(f'Position {position_name}'):
self.run_position(position_name, position_coords)
self.heartbeat()
def run_timepoint(self, scheduled_start):
try:
self.heartbeat()
self.timepoint_prefix = time.strftime('%Y-%m-%dt%H%M')
self.scheduled_start = scheduled_start
self.start_time = time.time()
self._job_futures = []
self.logger.info('Starting timepoint {} ({:.0f} minutes after scheduled)', self.timepoint_prefix,
(self.start_time-self.scheduled_start)/60)
self.experiment_metadata.setdefault('timepoints', []).append(self.timepoint_prefix)
self.experiment_metadata.setdefault('timestamps', []).append(self.start_time)
self.logger.info('Configuring timepoint')
with self.debug_timing('Configuration'):
self.configure_timepoint()
self.heartbeat()
self.run_all_positions()
self.finalize_timepoint()
self.heartbeat()
self.end_time = time.time()
self.experiment_metadata.setdefault('durations', []).append(self.end_time - self.start_time)
if self.write_files:
self._write_atomic_json(self.experiment_metadata_path, self.experiment_metadata)
run_again = self.skip_positions != self.positions.keys()
with self.debug_timing('Image IO'), self.heartbeat_timer():
self.image_io.wait()
if self._job_futures:
with self.debug_timing('Background jobs'), self.heartbeat_timer():
futures.wait(self._job_futures)
[f.result() for f in self._job_futures]
self.cleanup()
self.logger.info('Timepoint {} ended ({:.0f} minutes after starting)', self.timepoint_prefix,
(time.time()-self.start_time)/60)
if run_again:
return self.get_next_run_time()
except:
self.logger.error('Exception in timepoint:', exc_info=True)
raise
def add_background_job(self, function, *args, **kws):
if self._job_thread is None:
self._job_thread = futures.ThreadPoolExecutor(max_workers=1)
self._job_futures.append(self._job_thread.submit(function, *args, **kws))
def _position_metadata(self, position_name):
position_dir = self.data_dir / position_name
metadata_path = position_dir / 'position_metadata.json'
if metadata_path.exists():
with metadata_path.open('r') as f:
position_metadata = json.load(f)
else:
position_metadata = []
return position_dir, metadata_path, position_metadata
def run_position(self, position_name, position_coords):
timestamp = time.time()
position_dir, metadata_path, position_metadata = self._position_metadata(position_name)
position_dir.mkdir(exist_ok=True)
if self.scope is not None:
with self.debug_timing('Stage positioning'):
self.scope.stage.position = position_coords
images, image_names, new_metadata = self.acquire_images(position_name, position_dir, position_metadata)
new_metadata['timestamp'] = timestamp
new_metadata['timepoint'] = self.timepoint_prefix
position_metadata.append(new_metadata)
self.finalize_acquisition(position_name, position_dir, position_metadata)
image_paths = [position_dir / (self.timepoint_prefix + ' ' + name) for name in image_names]
if new_metadata is None:
new_metadata = {}
if self.write_files:
self.image_io.write(images, image_paths, self.IMAGE_COMPRESSION)
self._write_atomic_json(metadata_path, position_metadata)
def _write_atomic_json(self, out_path, data):
datafile.json_encode_atomic_legible_to_file(data, out_path)
def configure_timepoint(self):
pass
def finalize_timepoint(self):
pass
def finalize_acquisition(self, position_name, position_dir, position_metadata):
pass
def cleanup(self):
pass
def get_next_run_time(self):
return None
def acquire_images(self, position_name, position_dir, position_metadata):
raise NotImplementedError()
@classmethod
def main(cls, timepoint_dir=None, **cls_init_args):
if timepoint_dir is None:
timepoint_dir = pathlib.Path(inspect.getfile(cls)).parent
scheduled_start = None
for arg in sys.argv[1:]:
if arg.count('='):
while arg.startswith('-'):
arg = arg[1:]
arg = arg.replace('-', '_')
exec(arg, dict(logging=logging), cls_init_args)
elif scheduled_start is None:
scheduled_start = float(arg)
else:
raise ValueError('More than one schedule start time provided')
if scheduled_start is None:
scheduled_start = time.time()
handler = cls(timepoint_dir, **cls_init_args)
next_run_time = handler.run_timepoint(scheduled_start)
if next_run_time:
print('next run:{}'.format(next_run_time))
| true
| true
|
790b9cc97da09add8657988561a1ac0078875952
| 1,006
|
py
|
Python
|
kubernetes/test/test_v1_scale_io_volume_source.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_scale_io_volume_source.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_scale_io_volume_source.py
|
iguazio/python
|
c2684bb479d44a49a2010ec4ede5ffa7b17349dd
|
[
"Apache-2.0"
] | 1
|
2019-01-10T11:13:52.000Z
|
2019-01-10T11:13:52.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
class TestV1ScaleIOVolumeSource(unittest.TestCase):
""" V1ScaleIOVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOVolumeSource(self):
"""
Test V1ScaleIOVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_volume_source.V1ScaleIOVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 22.355556
| 105
| 0.72167
|
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_volume_source import V1ScaleIOVolumeSource
class TestV1ScaleIOVolumeSource(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOVolumeSource(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
790b9e3ac9ec40ba33c3ccf65d54b2efa218272e
| 3,459
|
py
|
Python
|
fibers/_pyfibers.py
|
timgates42/python-fibers
|
e96200d1bd058fb3e5931f37af68b2e18f3043d5
|
[
"MIT"
] | null | null | null |
fibers/_pyfibers.py
|
timgates42/python-fibers
|
e96200d1bd058fb3e5931f37af68b2e18f3043d5
|
[
"MIT"
] | null | null | null |
fibers/_pyfibers.py
|
timgates42/python-fibers
|
e96200d1bd058fb3e5931f37af68b2e18f3043d5
|
[
"MIT"
] | null | null | null |
import _continuation
import threading
__all__ = ['Fiber', 'error', 'current']
_tls = threading.local()
def current():
try:
return _tls.current_fiber
except AttributeError:
fiber = _tls.current_fiber = _tls.main_fiber = _create_main_fiber()
return fiber
class error(Exception):
pass
class Fiber(object):
_cont = None
_thread_id = None
_ended = False
def __init__(self, target=None, args=[], kwargs={}, parent=None):
def _run(c):
_tls.current_fiber = self
try:
return target(*args, **kwargs)
finally:
cont = self._cont
self._cont = None
self._ended = True
_continuation.permute(cont, self._get_active_parent()._cont)
self._func = _run
if parent is None:
parent = current()
self._thread_id = threading.current_thread().ident
if self._thread_id != parent._thread_id:
raise error('parent cannot be on a different thread')
self.parent = parent
def _get_active_parent(self):
parent = self.parent
while True:
if parent is not None and parent._cont is not None and not parent._ended:
break
parent = parent.parent
return parent
@classmethod
def current(cls):
return current()
@property
def parent(self):
return self.__dict__.get('parent', None)
@parent.setter
def parent(self, value):
if not isinstance(value, Fiber):
raise TypeError('parent must be a Fiber')
if value._ended:
raise ValueError('parent must not have ended')
if self._thread_id != value._thread_id:
raise ValueError('parent cannot be on a different thread')
self.__dict__['parent'] = value
def switch(self, value=None):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
self._cont = _continuation.continulet(self._func)
try:
return curr._cont.switch(value=value, to=self._cont)
finally:
_tls.current_fiber = curr
def throw(self, *args):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
# Fiber was not started yet, propagate to parent directly
self._ended = True
return self._get_active_parent().throw(*args)
try:
return curr._cont.throw(*args, to=self._cont)
finally:
_tls.current_fiber = curr
def is_alive(self):
return (self._cont is not None and self._cont.is_pending()) or \
(self._cont is None and not self._ended)
def __getstate__(self):
raise TypeError('cannot serialize Fiber object')
def _create_main_fiber():
main_fiber = Fiber.__new__(Fiber)
main_fiber._cont = _continuation.continulet.__new__(_continuation.continulet)
main_fiber._ended = False
main_fiber._thread_id = threading.current_thread().ident
main_fiber.__dict__['parent'] = None
return main_fiber
| 27.895161
| 85
| 0.603643
|
import _continuation
import threading
__all__ = ['Fiber', 'error', 'current']
_tls = threading.local()
def current():
try:
return _tls.current_fiber
except AttributeError:
fiber = _tls.current_fiber = _tls.main_fiber = _create_main_fiber()
return fiber
class error(Exception):
pass
class Fiber(object):
_cont = None
_thread_id = None
_ended = False
def __init__(self, target=None, args=[], kwargs={}, parent=None):
def _run(c):
_tls.current_fiber = self
try:
return target(*args, **kwargs)
finally:
cont = self._cont
self._cont = None
self._ended = True
_continuation.permute(cont, self._get_active_parent()._cont)
self._func = _run
if parent is None:
parent = current()
self._thread_id = threading.current_thread().ident
if self._thread_id != parent._thread_id:
raise error('parent cannot be on a different thread')
self.parent = parent
def _get_active_parent(self):
parent = self.parent
while True:
if parent is not None and parent._cont is not None and not parent._ended:
break
parent = parent.parent
return parent
@classmethod
def current(cls):
return current()
@property
def parent(self):
return self.__dict__.get('parent', None)
@parent.setter
def parent(self, value):
if not isinstance(value, Fiber):
raise TypeError('parent must be a Fiber')
if value._ended:
raise ValueError('parent must not have ended')
if self._thread_id != value._thread_id:
raise ValueError('parent cannot be on a different thread')
self.__dict__['parent'] = value
def switch(self, value=None):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
self._cont = _continuation.continulet(self._func)
try:
return curr._cont.switch(value=value, to=self._cont)
finally:
_tls.current_fiber = curr
def throw(self, *args):
if self._ended:
raise error('Fiber has ended')
curr = current()
if curr._thread_id != self._thread_id:
raise error('Cannot switch to a fiber on a different thread')
if self._cont is None:
self._ended = True
return self._get_active_parent().throw(*args)
try:
return curr._cont.throw(*args, to=self._cont)
finally:
_tls.current_fiber = curr
def is_alive(self):
return (self._cont is not None and self._cont.is_pending()) or \
(self._cont is None and not self._ended)
def __getstate__(self):
raise TypeError('cannot serialize Fiber object')
def _create_main_fiber():
main_fiber = Fiber.__new__(Fiber)
main_fiber._cont = _continuation.continulet.__new__(_continuation.continulet)
main_fiber._ended = False
main_fiber._thread_id = threading.current_thread().ident
main_fiber.__dict__['parent'] = None
return main_fiber
| true
| true
|
790b9f2574998fa91c56aa8a6f4266c340e79a8d
| 2,076
|
py
|
Python
|
01/Task13.py
|
omartrinidad/pattern-recognition-bit
|
ba3eb4e541fff2b1aedbaa4420d7a8cea8100dc7
|
[
"MIT"
] | null | null | null |
01/Task13.py
|
omartrinidad/pattern-recognition-bit
|
ba3eb4e541fff2b1aedbaa4420d7a8cea8100dc7
|
[
"MIT"
] | null | null | null |
01/Task13.py
|
omartrinidad/pattern-recognition-bit
|
ba3eb4e541fff2b1aedbaa4420d7a8cea8100dc7
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
def updateParams(k, alpha, N,sum_log_di, x, h):
div_xByAlpha = np.divide(x,alpha)
powK_div_xByAlpha = np.power(div_xByAlpha, k)
log_div_xByAlpha = np.log(div_xByAlpha)
sum_powK_div_diByAlpha = np.sum(np.multiply(powK_div_xByAlpha, h))
sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,log_div_xByAlpha),h))
sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,np.power(log_div_xByAlpha,2)),h))
#N = d.shape[0]
hessian = np.zeros((2,2))
hessian[0,0] = -1.0 * ((N/(k*k)) + sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha)
hessian[1,1] = (k/(alpha*alpha)) * (N-(k+1)*sum_powK_div_diByAlpha)
hessian[0,1] = hessian[1,0] = (1.0/alpha)*sum_powK_div_diByAlpha + (k/alpha)*sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha - N/alpha
vec = np.zeros((2,1))
vec[0] = -1.0 *( N/k - N*np.log(alpha) + sum_log_di - sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha)
vec[1] = -1.0 *(k/alpha * (sum_powK_div_diByAlpha - N))
param = np.linalg.inv(hessian).dot(vec)
return k+param[0], alpha+param[1]
if __name__ == "__main__":
#loading histograms
data = np.loadtxt('myspace.csv',dtype=np.object,comments='#',delimiter=',')
h = data[:,1].astype(np.int)
h = np.array([x for x in h if x>0])
x = np.array([num for num in range(1, h.shape[0]+1)])
k = 1.0
alpha = 1.0
N = np.sum(h)
sum_log_di = np.sum(np.multiply(np.log(x), h))
for i in range(0,20):
k,alpha = updateParams(k, alpha, N, sum_log_di, x, h)
print i
print k
print alpha
print "________"
x_1 = np.linspace(1,500,2500)
fig = plt.figure()
axs = fig.add_subplot(111)
y = N * (k/alpha) * np.multiply(np.power(np.divide(x_1,alpha), k-1), np.exp(-1.0* np.power(np.divide(x_1,alpha), k)))
axs.plot(x_1,y, 'b')
axs.plot(x, h, 'g')
plt.show()
| 37.071429
| 143
| 0.663776
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
def updateParams(k, alpha, N,sum_log_di, x, h):
div_xByAlpha = np.divide(x,alpha)
powK_div_xByAlpha = np.power(div_xByAlpha, k)
log_div_xByAlpha = np.log(div_xByAlpha)
sum_powK_div_diByAlpha = np.sum(np.multiply(powK_div_xByAlpha, h))
sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,log_div_xByAlpha),h))
sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha = np.sum(np.multiply(np.multiply(powK_div_xByAlpha,np.power(log_div_xByAlpha,2)),h))
hessian = np.zeros((2,2))
hessian[0,0] = -1.0 * ((N/(k*k)) + sum_prod_OF_powK_div_diByAlpha_AND_logP2_div_diByAlpha)
hessian[1,1] = (k/(alpha*alpha)) * (N-(k+1)*sum_powK_div_diByAlpha)
hessian[0,1] = hessian[1,0] = (1.0/alpha)*sum_powK_div_diByAlpha + (k/alpha)*sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha - N/alpha
vec = np.zeros((2,1))
vec[0] = -1.0 *( N/k - N*np.log(alpha) + sum_log_di - sum_prod_OF_powK_div_diByAlpha_AND_log_div_diByAlpha)
vec[1] = -1.0 *(k/alpha * (sum_powK_div_diByAlpha - N))
param = np.linalg.inv(hessian).dot(vec)
return k+param[0], alpha+param[1]
if __name__ == "__main__":
data = np.loadtxt('myspace.csv',dtype=np.object,comments='#',delimiter=',')
h = data[:,1].astype(np.int)
h = np.array([x for x in h if x>0])
x = np.array([num for num in range(1, h.shape[0]+1)])
k = 1.0
alpha = 1.0
N = np.sum(h)
sum_log_di = np.sum(np.multiply(np.log(x), h))
for i in range(0,20):
k,alpha = updateParams(k, alpha, N, sum_log_di, x, h)
print i
print k
print alpha
print "________"
x_1 = np.linspace(1,500,2500)
fig = plt.figure()
axs = fig.add_subplot(111)
y = N * (k/alpha) * np.multiply(np.power(np.divide(x_1,alpha), k-1), np.exp(-1.0* np.power(np.divide(x_1,alpha), k)))
axs.plot(x_1,y, 'b')
axs.plot(x, h, 'g')
plt.show()
| false
| true
|
790b9f9c2a8e69ff060dcb45c97227164ad46f3d
| 12,145
|
py
|
Python
|
evaluate.py
|
TUM-LMF/fieldRNN
|
5e9e17b170fe000ae15a73a276742aea84e6410b
|
[
"MIT"
] | 42
|
2017-09-02T12:49:26.000Z
|
2021-06-23T09:31:04.000Z
|
evaluate.py
|
TUM-LMF/fieldRNN
|
5e9e17b170fe000ae15a73a276742aea84e6410b
|
[
"MIT"
] | 4
|
2019-03-20T08:19:45.000Z
|
2022-02-09T23:53:03.000Z
|
evaluate.py
|
TUM-LMF/fieldRNN
|
5e9e17b170fe000ae15a73a276742aea84e6410b
|
[
"MIT"
] | 17
|
2018-03-09T03:38:44.000Z
|
2021-08-21T17:37:21.000Z
|
import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
import os
import datetime
import numpy as np
import argparse
from cnn_model import unroll
def main():
parser = argparse.ArgumentParser(description='Evaluate .')
parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')
parser.add_argument('--model', type=str, help="Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)", default='lstm')
parser.add_argument('--gpu', type=int, help="Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)", default=None)
args = parser.parse_args()
""" GPU management """
allow_gpu_mem_growth = True
gpu_memory_fraction = 1
gpu_id = args.gpu
if args.gpu is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
dataloader = Dataloader(datafolder="data/eval", batchsize=500)
#dataloader = Dataloader(conn=conn, batch_size=args.batchsize, sql_where=args.sqlwhere,
# debug=False,
# do_shuffle=False, do_init_shuffle=True, tablename=args.tablename)
"""
Load
parameters
from init_from model
"""
with open(os.path.join(args.rundir, "args.pkl"), "rb") as f:
modelargs = pickle.load(f)
"""
Create
new
model
object
with same parameter """
print("building model graph")
if args.model in ["rnn","lstm"]:
model = rnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"], batch_size=dataloader.batchsize,
adam_lr=modelargs["adam_lr"],rnn_cell_type=args.model , dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=0)
evaluate=evaluate_rnn
if args.model == "cnn":
model = cnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"],
adam_lr=1e-3, dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=gpu_id)
evaluate = evaluate_cnn
probabilities, targets, observations = evaluate(model,dataloader,
init_dir=args.rundir,
print_every=20,
gpu_memory_fraction=gpu_memory_fraction,
allow_gpu_mem_growth=allow_gpu_mem_growth)
#np.save(os.path.join(args.rundir, "eval_confusion_matrix.npy"), confusion_matrix)
np.save(os.path.join(args.rundir, "eval_probabilities.npy"), probabilities)
np.save(os.path.join(args.rundir, "eval_targets.npy"), targets)
np.save(os.path.join(args.rundir, "eval_observations.npy"), observations)
def evaluate_rnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],
feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
#total_cm += cm
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
all_obs
def evaluate_cnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
"""
This function initialized a model from the <init_from> directory and calculates
probabilities, and confusion matrices based on all data stored in
one epoch of dataloader (usually test data)
:param model: rnn_model object containing tensorflow graph
:param dataloader: DataLoader object for loading batches
:param print_every: console log frequency
:param allow_gpu_mem_growth: dynamic growth of gpu vram
:param gpu_memory_fraction: hard upper limit for gpu vram
:returns confusion_matrix <float> [n_classes x n_classes] rows as targets cols as predicted
:returns probabilities <float> [all observations x n_classes] probabilities for each class per observation
:returns targets <bool> [all observations x n_classes] reference data for each class per observation
:returns observations <int> [all_observations]position of observation in the sequence
e.g. [1,2,3,4,1,2,3,4,5,6,1,2,3,4, ...]
"""
saver = tf.train.Saver()
# container for output data
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
with open(init_dir + "/steps.txt", "r") as f:
line = f.read()
step_, epoch_ = line.split(" ")
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
# step as number of features -> invariant to changes in batch size
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
# unroll also index of observation. -> TODO integrate in unroll function, but need to update also dependencies
batch_size, max_seqlengths, n_input = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
mask = mask_.reshape(-1)
obs_ = np.arange(0, max_seqlengths) * ones
obs = obs_.reshape(-1)[mask]
""" unroll data """
X, y = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
scores, targets = sess.run([model.scores, model.targets],
feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
# approximate calculation time
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
obs
if __name__ == '__main__':
main()
| 41.309524
| 192
| 0.594566
|
import tensorflow as tf
import cPickle as pickle
import rnn_model
import cnn_model
from dataloader import Dataloader
import os
import datetime
import numpy as np
import argparse
from cnn_model import unroll
def main():
parser = argparse.ArgumentParser(description='Evaluate .')
parser.add_argument('rundir', type=str, help='directory of tf checkpoint file')
parser.add_argument('--model', type=str, help="Neural network architecture. 'lstm', 'rnn' or 'cnn' (default lstm)", default='lstm')
parser.add_argument('--gpu', type=int, help="Select gpu (e.g. 0), via environment variable CUDA_VISIBLE_DEVICES (default None)", default=None)
args = parser.parse_args()
allow_gpu_mem_growth = True
gpu_memory_fraction = 1
gpu_id = args.gpu
if args.gpu is not None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = gpu_id
dataloader = Dataloader(datafolder="data/eval", batchsize=500)
with open(os.path.join(args.rundir, "args.pkl"), "rb") as f:
modelargs = pickle.load(f)
print("building model graph")
if args.model in ["rnn","lstm"]:
model = rnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"], batch_size=dataloader.batchsize,
adam_lr=modelargs["adam_lr"],rnn_cell_type=args.model , dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=0)
evaluate=evaluate_rnn
if args.model == "cnn":
model = cnn_model.Model(n_input=modelargs["n_input"], n_classes=modelargs["n_classes"], n_layers=modelargs["n_layers"],
adam_lr=1e-3, dropout_keep_prob=modelargs["dropout_keep_prob"], n_cell_per_input=modelargs["n_cell_per_input"], gpu=gpu_id)
evaluate = evaluate_cnn
probabilities, targets, observations = evaluate(model,dataloader,
init_dir=args.rundir,
print_every=20,
gpu_memory_fraction=gpu_memory_fraction,
allow_gpu_mem_growth=allow_gpu_mem_growth)
np.save(os.path.join(args.rundir, "eval_probabilities.npy"), probabilities)
np.save(os.path.join(args.rundir, "eval_targets.npy"), targets)
np.save(os.path.join(args.rundir, "eval_observations.npy"), observations)
def evaluate_rnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
feed = {model.X: X, model.y_: y, model.seq_lengths: seq_lengths}
cm, scores, targets, obs = sess.run([model.confusion_matrix, model.scores, model.targets, model.obs],
feed_dict=feed)
all_obs = np.append(all_obs, obs)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
all_obs
def evaluate_cnn(model,
dataloader,
print_every=5,
init_dir=None,
allow_gpu_mem_growth=True,
gpu_memory_fraction=0.3):
saver = tf.train.Saver()
total_cm = np.zeros((model.n_classes, model.n_classes))
all_scores = np.array([])
all_targets = np.array([])
all_obs = np.array([])
step = 0
t_last = datetime.datetime.now()
config = tf.ConfigProto()
config.gpu_options.allow_growth = allow_gpu_mem_growth
config.gpu_options.per_process_gpu_memory_fraction = gpu_memory_fraction
config.allow_soft_placement = True
print("start")
with tf.Session(config=config) as sess:
sess.run([model.init_op])
if init_dir is not None:
if os.path.exists(init_dir):
ckpt = tf.train.get_checkpoint_state(init_dir)
print("restoring model from %s" % ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
with open(init_dir + "/steps.txt", "r") as f:
line = f.read()
step_, epoch_ = line.split(" ")
step = int(step_)
dataloader.epoch = int(epoch_)
for i in range(1, dataloader.num_batches):
step += dataloader.batch_size
s_db = datetime.datetime.now()
X, y, seq_lengths = dataloader.next_batch()
e_db = datetime.datetime.now()
batch_size, max_seqlengths, n_input = X.shape
ones = np.ones([batch_size, max_seqlengths])
mask_ = np.arange(0, max_seqlengths) * ones < (seq_lengths * ones.T).T
mask = mask_.reshape(-1)
obs_ = np.arange(0, max_seqlengths) * ones
obs = obs_.reshape(-1)[mask]
X, y = unroll(X, y, seq_lengths)
feed = {model.X: X, model.y: y, model.batch_size: X.shape[0]}
scores, targets = sess.run([model.scores, model.targets],
feed_dict=feed)
all_scores = np.append(all_scores, scores)
all_targets = np.append(all_targets, targets)
e_tr = datetime.datetime.now()
dt_db = e_db - s_db
dt_tr = e_tr - e_db
field_per_s = dataloader.batch_size / (datetime.datetime.now() - t_last).total_seconds()
approx_calc_time = (((dataloader.num_feat) - step) / field_per_s)
eta = datetime.datetime.now() + datetime.timedelta(seconds=approx_calc_time)
t_last = datetime.datetime.now()
if i % print_every == 0:
cross_entropy = sess.run(model.cross_entropy, feed_dict=feed)
msg = "Gathering: Iteration {}, feature {}, epoch {}, batch {}/{}: xentr {:.2f} " \
"(time: db {}ms; eval {}ms, {} feat/s, eta: {})".format(
i,
step,
dataloader.epoch,
dataloader.batch,
dataloader.num_batches,
cross_entropy,
int(dt_db.total_seconds() * 1000),
int(dt_tr.total_seconds() * 1000),
int(field_per_s),
eta.strftime("%d.%b %H:%M")
)
print(msg)
return all_scores.reshape(-1, model.n_classes), \
all_targets.reshape(-1, model.n_classes).astype(bool), \
obs
if __name__ == '__main__':
main()
| true
| true
|
790b9fa31288bce41760b4441d82b587c8002969
| 12,478
|
py
|
Python
|
transformers/__init__.py
|
seongwookchun/transformers
|
9b3817259020ae8fc3e310f7eea896413826a526
|
[
"Apache-2.0"
] | null | null | null |
transformers/__init__.py
|
seongwookchun/transformers
|
9b3817259020ae8fc3e310f7eea896413826a526
|
[
"Apache-2.0"
] | null | null | null |
transformers/__init__.py
|
seongwookchun/transformers
|
9b3817259020ae8fc3e310f7eea896413826a526
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "2.2.3"
# Work around to update TensorFlow's absl.logging threshold which alters the
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
from .data import (is_sklearn_available,
InputExample, InputFeatures, DataProcessor,
glue_output_modes, glue_convert_examples_to_features,
glue_processors, glue_tasks_num_labels,
xnli_output_modes, xnli_processors, xnli_tasks_num_labels,
squad_convert_examples_to_features, SquadFeatures,
SquadExample, SquadV1Processor, SquadV2Processor)
if is_sklearn_available():
from .data import glue_compute_metrics, xnli_compute_metrics
# ETRI modified ver
from .etri_tf_tokenization import FullTokenizer
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer, MecabTokenizer, CharacterTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
from .tokenization_albert import AlbertTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_t5 import T5Tokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_albert import AlbertConfig, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_camembert import CamembertConfig, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_t5 import T5Config, T5_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
AdaptiveEmbedding,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForTokenClassification,
XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple,
XLNetForQuestionAnswering, load_tf_weights_in_xlnet,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
RobertaForTokenClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertPreTrainedModel, DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DistilBertForTokenClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_camembert import (CamembertForMaskedLM, CamembertModel,
CamembertForSequenceClassification, CamembertForMultipleChoice,
CamembertForTokenClassification,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_encoder_decoder import PreTrainedEncoderDecoder, Model2Model
from .modeling_t5 import (T5PreTrainedModel, T5Model, T5WithLMHeadModel,
load_tf_weights_in_t5,
T5_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import (AlbertPreTrainedModel, AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification,
AlbertForQuestionAnswering,
load_tf_weights_in_albert, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization import (AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup)
# TensorFlow
if is_tf_available():
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary, shape_list
from .modeling_tf_auto import (TFAutoModel, TFAutoModelForSequenceClassification, TFAutoModelForQuestionAnswering,
TFAutoModelWithLMHead)
from .modeling_tf_bert import (TFBertPreTrainedModel, TFBertMainLayer, TFBertEmbeddings,
TFBertModel, TFBertForPreTraining,
TFBertForMaskedLM, TFBertForNextSentencePrediction,
TFBertForSequenceClassification, TFBertForMultipleChoice,
TFBertForTokenClassification, TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_gpt2 import (TFGPT2PreTrainedModel, TFGPT2MainLayer,
TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_openai import (TFOpenAIGPTPreTrainedModel, TFOpenAIGPTMainLayer,
TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_transfo_xl import (TFTransfoXLPreTrainedModel, TFTransfoXLMainLayer,
TFTransfoXLModel, TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlnet import (TFXLNetPreTrainedModel, TFXLNetMainLayer,
TFXLNetModel, TFXLNetLMHeadModel,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetForQuestionAnsweringSimple,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlm import (TFXLMPreTrainedModel, TFXLMMainLayer,
TFXLMModel, TFXLMWithLMHeadModel,
TFXLMForSequenceClassification,
TFXLMForQuestionAnsweringSimple,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_roberta import (TFRobertaPreTrainedModel, TFRobertaMainLayer,
TFRobertaModel, TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_distilbert import (TFDistilBertPreTrainedModel, TFDistilBertMainLayer,
TFDistilBertModel, TFDistilBertForMaskedLM,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForQuestionAnswering,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_ctrl import (TFCTRLPreTrainedModel, TFCTRLModel,
TFCTRLLMHeadModel,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_albert import (TFAlbertPreTrainedModel, TFAlbertModel, TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_t5 import (TFT5PreTrainedModel, TFT5Model, TFT5WithLMHeadModel,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization_tf import (WarmUp, create_optimizer, AdamWeightDecay, GradientAccumulator)
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
load_tf2_model_in_pytorch_model)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| 59.990385
| 128
| 0.677432
|
__version__ = "2.2.3"
# default Python logging output behavior when present.
# see: https://github.com/abseil/abseil-py/issues/99
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
absl.logging.set_verbosity('info')
absl.logging.set_stderrthreshold('info')
absl.logging._warn_preinit_stderr = False
except:
pass
import logging
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# Files and general utilities
from .file_utils import (TRANSFORMERS_CACHE, PYTORCH_TRANSFORMERS_CACHE, PYTORCH_PRETRAINED_BERT_CACHE,
cached_path, add_start_docstrings, add_end_docstrings,
WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, CONFIG_NAME,
is_tf_available, is_torch_available)
from .data import (is_sklearn_available,
InputExample, InputFeatures, DataProcessor,
glue_output_modes, glue_convert_examples_to_features,
glue_processors, glue_tasks_num_labels,
xnli_output_modes, xnli_processors, xnli_tasks_num_labels,
squad_convert_examples_to_features, SquadFeatures,
SquadExample, SquadV1Processor, SquadV2Processor)
if is_sklearn_available():
from .data import glue_compute_metrics, xnli_compute_metrics
# ETRI modified ver
from .etri_tf_tokenization import FullTokenizer
# Tokenizers
from .tokenization_utils import (PreTrainedTokenizer)
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer, MecabTokenizer, CharacterTokenizer
from .tokenization_openai import OpenAIGPTTokenizer
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
from .tokenization_gpt2 import GPT2Tokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_xlnet import XLNetTokenizer, SPIECE_UNDERLINE
from .tokenization_xlm import XLMTokenizer
from .tokenization_roberta import RobertaTokenizer
from .tokenization_distilbert import DistilBertTokenizer
from .tokenization_albert import AlbertTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_t5 import T5Tokenizer
# Configurations
from .configuration_utils import PretrainedConfig
from .configuration_auto import AutoConfig
from .configuration_bert import BertConfig, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_openai import OpenAIGPTConfig, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_transfo_xl import TransfoXLConfig, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_gpt2 import GPT2Config, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_ctrl import CTRLConfig, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlnet import XLNetConfig, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_xlm import XLMConfig, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_roberta import RobertaConfig, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_distilbert import DistilBertConfig, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_albert import AlbertConfig, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_camembert import CamembertConfig, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
from .configuration_t5 import T5Config, T5_PRETRAINED_CONFIG_ARCHIVE_MAP
# Modeling
if is_torch_available():
from .modeling_utils import (PreTrainedModel, prune_layer, Conv1D)
from .modeling_auto import (AutoModel, AutoModelForSequenceClassification, AutoModelForQuestionAnswering,
AutoModelWithLMHead)
from .modeling_bert import (BertPreTrainedModel, BertModel, BertForPreTraining,
BertForMaskedLM, BertForNextSentencePrediction,
BertForSequenceClassification, BertForMultipleChoice,
BertForTokenClassification, BertForQuestionAnswering,
load_tf_weights_in_bert, BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_openai import (OpenAIGPTPreTrainedModel, OpenAIGPTModel,
OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
load_tf_weights_in_openai_gpt, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_transfo_xl import (TransfoXLPreTrainedModel, TransfoXLModel, TransfoXLLMHeadModel,
AdaptiveEmbedding,
load_tf_weights_in_transfo_xl, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_gpt2 import (GPT2PreTrainedModel, GPT2Model,
GPT2LMHeadModel, GPT2DoubleHeadsModel,
load_tf_weights_in_gpt2, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_ctrl import (CTRLPreTrainedModel, CTRLModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlnet import (XLNetPreTrainedModel, XLNetModel, XLNetLMHeadModel,
XLNetForSequenceClassification, XLNetForTokenClassification,
XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple,
XLNetForQuestionAnswering, load_tf_weights_in_xlnet,
XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_xlm import (XLMPreTrainedModel , XLMModel,
XLMWithLMHeadModel, XLMForSequenceClassification,
XLMForQuestionAnswering, XLMForQuestionAnsweringSimple,
XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_roberta import (RobertaForMaskedLM, RobertaModel,
RobertaForSequenceClassification, RobertaForMultipleChoice,
RobertaForTokenClassification,
ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_distilbert import (DistilBertPreTrainedModel, DistilBertForMaskedLM, DistilBertModel,
DistilBertForSequenceClassification, DistilBertForQuestionAnswering,
DistilBertForTokenClassification,
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_camembert import (CamembertForMaskedLM, CamembertModel,
CamembertForSequenceClassification, CamembertForMultipleChoice,
CamembertForTokenClassification,
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_encoder_decoder import PreTrainedEncoderDecoder, Model2Model
from .modeling_t5 import (T5PreTrainedModel, T5Model, T5WithLMHeadModel,
load_tf_weights_in_t5,
T5_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_albert import (AlbertPreTrainedModel, AlbertModel, AlbertForMaskedLM, AlbertForSequenceClassification,
AlbertForQuestionAnswering,
load_tf_weights_in_albert, ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization import (AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup)
# TensorFlow
if is_tf_available():
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, TFSequenceSummary, shape_list
from .modeling_tf_auto import (TFAutoModel, TFAutoModelForSequenceClassification, TFAutoModelForQuestionAnswering,
TFAutoModelWithLMHead)
from .modeling_tf_bert import (TFBertPreTrainedModel, TFBertMainLayer, TFBertEmbeddings,
TFBertModel, TFBertForPreTraining,
TFBertForMaskedLM, TFBertForNextSentencePrediction,
TFBertForSequenceClassification, TFBertForMultipleChoice,
TFBertForTokenClassification, TFBertForQuestionAnswering,
TF_BERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_gpt2 import (TFGPT2PreTrainedModel, TFGPT2MainLayer,
TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel,
TF_GPT2_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_openai import (TFOpenAIGPTPreTrainedModel, TFOpenAIGPTMainLayer,
TFOpenAIGPTModel, TFOpenAIGPTLMHeadModel, TFOpenAIGPTDoubleHeadsModel,
TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_transfo_xl import (TFTransfoXLPreTrainedModel, TFTransfoXLMainLayer,
TFTransfoXLModel, TFTransfoXLLMHeadModel,
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlnet import (TFXLNetPreTrainedModel, TFXLNetMainLayer,
TFXLNetModel, TFXLNetLMHeadModel,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetForQuestionAnsweringSimple,
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_xlm import (TFXLMPreTrainedModel, TFXLMMainLayer,
TFXLMModel, TFXLMWithLMHeadModel,
TFXLMForSequenceClassification,
TFXLMForQuestionAnsweringSimple,
TF_XLM_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_roberta import (TFRobertaPreTrainedModel, TFRobertaMainLayer,
TFRobertaModel, TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_distilbert import (TFDistilBertPreTrainedModel, TFDistilBertMainLayer,
TFDistilBertModel, TFDistilBertForMaskedLM,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForQuestionAnswering,
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_ctrl import (TFCTRLPreTrainedModel, TFCTRLModel,
TFCTRLLMHeadModel,
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_albert import (TFAlbertPreTrainedModel, TFAlbertModel, TFAlbertForMaskedLM,
TFAlbertForSequenceClassification,
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_MAP)
from .modeling_tf_t5 import (TFT5PreTrainedModel, TFT5Model, TFT5WithLMHeadModel,
TF_T5_PRETRAINED_MODEL_ARCHIVE_MAP)
# Optimization
from .optimization_tf import (WarmUp, create_optimizer, AdamWeightDecay, GradientAccumulator)
# TF 2.0 <=> PyTorch conversion utilities
from .modeling_tf_pytorch_utils import (convert_tf_weight_name_to_pt_weight_name,
load_pytorch_checkpoint_in_tf2_model,
load_pytorch_weights_in_tf2_model,
load_pytorch_model_in_tf2_model,
load_tf2_checkpoint_in_pytorch_model,
load_tf2_weights_in_pytorch_model,
load_tf2_model_in_pytorch_model)
if not is_tf_available() and not is_torch_available():
logger.warning("Neither PyTorch nor TensorFlow >= 2.0 have been found."
"Models won't be available and only tokenizers, configuration"
"and file/data utilities can be used.")
| true
| true
|
790b9fb1483e1de5093e122f612049f94fddc227
| 1,862
|
py
|
Python
|
modules/crawler/DatasetProcessing/OBSAZENIMISTNOSTI_processor.py
|
kivzcu/heatmap.zcu
|
526c4df9c1c299eb1b3e9df6bd2be5578d462405
|
[
"MIT"
] | null | null | null |
modules/crawler/DatasetProcessing/OBSAZENIMISTNOSTI_processor.py
|
kivzcu/heatmap.zcu
|
526c4df9c1c299eb1b3e9df6bd2be5578d462405
|
[
"MIT"
] | null | null | null |
modules/crawler/DatasetProcessing/OBSAZENIMISTNOSTI_processor.py
|
kivzcu/heatmap.zcu
|
526c4df9c1c299eb1b3e9df6bd2be5578d462405
|
[
"MIT"
] | null | null | null |
from Utilities.CSV import csv_data_line
from Utilities import date_formating
import logging
from datetime import date
import time
import datetime
from shared_types import DateDict
logging.basicConfig(filename='../../CrawlerLogs' + 'Crawlerlog-' +
date.today().strftime("%b-%Y") + '.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
def process_file(filename: str) -> DateDict:
"""
Method that take path to crawled file and outputs date dictionary:
Date dictionary is a dictionary where keys are dates in format YYYY-mm-dd-hh (2018-04-08-15)
and value is dictionary where keys are devices (specified in configuration file)
and value is CSVDataLine.csv_data_line with device,date and occurrence
Args:
filename: name of processed file
Returns:
None if not implemented
date_dict when implemented
"""
date_dict = {}
with open(filename, "r") as file:
YEAR_START = 1
YEAR_END = 11
for line in file:
array = line.split(";")
#pick later time
time_ = max(
array[2][1:-1],
array[3][1:-1],
key=lambda x: time.mktime(
datetime.datetime.strptime(x, "%H:%M").timetuple()))
date = date_formating.date_time_formatter(
array[14][YEAR_START:YEAR_END] + " " + time_)
name = array[10][1:-1]
if name == "":
continue
if date not in date_dict:
date_dict[date] = {}
if name in date_dict[date]:
date_dict[date][name].occurrence = int(array[12])
else:
date_dict[date][name] = csv_data_line.CSVDataLine(
name, date, int(array[12]))
return date_dict
| 29.09375
| 96
| 0.575188
|
from Utilities.CSV import csv_data_line
from Utilities import date_formating
import logging
from datetime import date
import time
import datetime
from shared_types import DateDict
logging.basicConfig(filename='../../CrawlerLogs' + 'Crawlerlog-' +
date.today().strftime("%b-%Y") + '.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
def process_file(filename: str) -> DateDict:
date_dict = {}
with open(filename, "r") as file:
YEAR_START = 1
YEAR_END = 11
for line in file:
array = line.split(";")
time_ = max(
array[2][1:-1],
array[3][1:-1],
key=lambda x: time.mktime(
datetime.datetime.strptime(x, "%H:%M").timetuple()))
date = date_formating.date_time_formatter(
array[14][YEAR_START:YEAR_END] + " " + time_)
name = array[10][1:-1]
if name == "":
continue
if date not in date_dict:
date_dict[date] = {}
if name in date_dict[date]:
date_dict[date][name].occurrence = int(array[12])
else:
date_dict[date][name] = csv_data_line.CSVDataLine(
name, date, int(array[12]))
return date_dict
| true
| true
|
790ba23a1f89d392c6c4402663883408927c5005
| 1,812
|
py
|
Python
|
ambari-server/src/main/resources/stacks/HDP/2.5/services/IMPALA/package/scripts/impala-state-store.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | 3
|
2019-06-20T11:49:36.000Z
|
2020-12-11T10:44:29.000Z
|
ambari-server/src/main/resources/stacks/HDP/2.5/services/IMPALA/package/scripts/impala-state-store.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/main/resources/stacks/HDP/2.5/services/IMPALA/package/scripts/impala-state-store.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | 1
|
2019-03-20T08:36:17.000Z
|
2019-03-20T08:36:17.000Z
|
import sys, os, pwd, signal, time
from resource_management import *
from resource_management.core.base import Fail
from resource_management.core.exceptions import ComponentIsNotRunning
from subprocess import call
from impala_base import ImpalaBase
class StateStore(ImpalaBase):
#Call setup.sh to install the service
def install(self, env):
# Install packages listed in metainfo.xml
self.install_packages(env)
self.installImpala(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
#Call start.sh to start the service
def start(self, env):
import params
self.configure(env)
#self.create_hdfs_user(params.flink_user)
cmd = 'service impala-state-store start'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
#Called to stop the service using the pidfile
def stop(self, env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def restart(self,env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd, ignore_failures=True)
self.start(env)
#Called to get status of the service using the pidfile
def status(self, env):
cmd = 'service impala-state-store status'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def create_hdfs_user(self, user):
Execute('hadoop fs -mkdir -p /user/'+user, user='hdfs', ignore_failures=True)
Execute('hadoop fs -chown ' + user + ' /user/'+user, user='hdfs')
Execute('hadoop fs -chgrp ' + user + ' /user/'+user, user='hdfs')
if __name__ == "__main__":
StateStore().execute()
| 31.789474
| 85
| 0.63521
|
import sys, os, pwd, signal, time
from resource_management import *
from resource_management.core.base import Fail
from resource_management.core.exceptions import ComponentIsNotRunning
from subprocess import call
from impala_base import ImpalaBase
class StateStore(ImpalaBase):
def install(self, env):
self.install_packages(env)
self.installImpala(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
self.configure(env)
cmd = 'service impala-state-store start'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def stop(self, env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def restart(self,env):
cmd = 'service impala-state-store stop'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd, ignore_failures=True)
self.start(env)
def status(self, env):
cmd = 'service impala-state-store status'
Execute('echo "Running cmd: ' + cmd + '"')
Execute(cmd)
def create_hdfs_user(self, user):
Execute('hadoop fs -mkdir -p /user/'+user, user='hdfs', ignore_failures=True)
Execute('hadoop fs -chown ' + user + ' /user/'+user, user='hdfs')
Execute('hadoop fs -chgrp ' + user + ' /user/'+user, user='hdfs')
if __name__ == "__main__":
StateStore().execute()
| true
| true
|
790ba29310674e8c543daa342a1ff8b866b509f8
| 401
|
py
|
Python
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/atomic/g_year_month/schema_instance/nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd.nistschema_sv_iv_atomic_g_year_month_enumeration_5 import (
NistschemaSvIvAtomicGYearMonthEnumeration5,
NistschemaSvIvAtomicGYearMonthEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicGYearMonthEnumeration5",
"NistschemaSvIvAtomicGYearMonthEnumeration5Type",
]
| 40.1
| 179
| 0.875312
|
from output.models.nist_data.atomic.g_year_month.schema_instance.nistschema_sv_iv_atomic_g_year_month_enumeration_5_xsd.nistschema_sv_iv_atomic_g_year_month_enumeration_5 import (
NistschemaSvIvAtomicGYearMonthEnumeration5,
NistschemaSvIvAtomicGYearMonthEnumeration5Type,
)
__all__ = [
"NistschemaSvIvAtomicGYearMonthEnumeration5",
"NistschemaSvIvAtomicGYearMonthEnumeration5Type",
]
| true
| true
|
790ba2ea6ec025cbc3de4e69fc78f8ea5e36e0a8
| 433
|
py
|
Python
|
modoboa/transport/api/v2/tests.py
|
suryatmodulus/modoboa
|
f8164a9bbe1e5bfa7f1a1f8813a3790ebf3397ee
|
[
"ISC"
] | null | null | null |
modoboa/transport/api/v2/tests.py
|
suryatmodulus/modoboa
|
f8164a9bbe1e5bfa7f1a1f8813a3790ebf3397ee
|
[
"ISC"
] | null | null | null |
modoboa/transport/api/v2/tests.py
|
suryatmodulus/modoboa
|
f8164a9bbe1e5bfa7f1a1f8813a3790ebf3397ee
|
[
"ISC"
] | null | null | null |
"""API v2 tests."""
from django.urls import reverse
from modoboa.lib.tests import ModoAPITestCase
class TransportViewSetTestCase(ModoAPITestCase):
def test_list(self):
url = reverse("v2:transport-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
backends = resp.json()
self.assertEqual(len(backends), 1)
self.assertEqual(backends[0]["name"], "relay")
| 25.470588
| 54
| 0.672055
|
from django.urls import reverse
from modoboa.lib.tests import ModoAPITestCase
class TransportViewSetTestCase(ModoAPITestCase):
def test_list(self):
url = reverse("v2:transport-list")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
backends = resp.json()
self.assertEqual(len(backends), 1)
self.assertEqual(backends[0]["name"], "relay")
| true
| true
|
790ba3c7b2e94de8a1eba66cde239879c2f7ca94
| 5,717
|
py
|
Python
|
tests/kafkatest/tests/client/message_format_change_test.py
|
1810824959/kafka
|
bb1ef567b44208e63459ca6f9db0654d867d7e7e
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/tests/client/message_format_change_test.py
|
1810824959/kafka
|
bb1ef567b44208e63459ca6f9db0654d867d7e7e
|
[
"Apache-2.0"
] | null | null | null |
tests/kafkatest/tests/client/message_format_change_test.py
|
1810824959/kafka
|
bb1ef567b44208e63459ca6f9db0654d867d7e7e
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from ducktape.mark.resource import cluster
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, LATEST_0_11, DEV_BRANCH, KafkaVersion
class MessageFormatChangeTest(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(MessageFormatChangeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
# Producer and consumer
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 100
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
@cluster(num_nodes=12)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
@parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
""" This tests performs the following checks:
The workload is a mix of 0.9.x, 0.10.x and 0.11.x producers and consumers
that produce to and consume from a DEV_BRANCH cluster
1. initially the topic is using message format 0.9.0
2. change the message format version for topic to 0.10.0 on the fly.
3. change the message format version for topic to 0.11.0 on the fly.
4. change the message format version for topic back to 0.10.0 on the fly (only if the client version is 0.11.0 or newer)
- The producers and consumers should not have any issue.
Note regarding step number 4. Downgrading the message format version is generally unsupported as it breaks
older clients. More concretely, if we downgrade a topic from 0.11.0 to 0.10.0 after it contains messages with
version 0.11.0, we will return the 0.11.0 messages without down conversion due to an optimisation in the
handling of fetch requests. This will break any consumer that doesn't support 0.11.0. So, in practice, step 4
is similar to step 2 and it didn't seem worth it to increase the cluster size to in order to add a step 5 that
would change the message format version for the topic back to 0.9.0.0.
"""
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.kafka.start()
self.logger.info("First format change to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group1")
self.logger.info("Second format change to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group2")
self.logger.info("Third format change to 0.11.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
self.produce_and_consume(producer_version, consumer_version, "group3")
if producer_version == str(DEV_BRANCH) and consumer_version == str(DEV_BRANCH):
self.logger.info("Fourth format change back to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group4")
| 55.504854
| 128
| 0.682526
|
from ducktape.mark import parametrize
from ducktape.utils.util import wait_until
from ducktape.mark.resource import cluster
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.tests.produce_consume_validate import ProduceConsumeValidateTest
from kafkatest.utils import is_int
from kafkatest.version import LATEST_0_9, LATEST_0_10, LATEST_0_11, DEV_BRANCH, KafkaVersion
class MessageFormatChangeTest(ProduceConsumeValidateTest):
def __init__(self, test_context):
super(MessageFormatChangeTest, self).__init__(test_context=test_context)
def setUp(self):
self.topic = "test_topic"
self.zk = ZookeeperService(self.test_context, num_nodes=1)
self.zk.start()
self.producer_throughput = 10000
self.num_producers = 1
self.num_consumers = 1
self.messages_per_producer = 100
def produce_and_consume(self, producer_version, consumer_version, group):
self.producer = VerifiableProducer(self.test_context, self.num_producers, self.kafka,
self.topic,
throughput=self.producer_throughput,
message_validator=is_int,
version=KafkaVersion(producer_version))
self.consumer = ConsoleConsumer(self.test_context, self.num_consumers, self.kafka,
self.topic, consumer_timeout_ms=30000,
message_validator=is_int, version=KafkaVersion(consumer_version))
self.consumer.group_id = group
self.run_produce_consume_validate(lambda: wait_until(
lambda: self.producer.each_produced_at_least(self.messages_per_producer) == True,
timeout_sec=120, backoff_sec=1,
err_msg="Producer did not produce all messages in reasonable amount of time"))
@cluster(num_nodes=12)
@parametrize(producer_version=str(DEV_BRANCH), consumer_version=str(DEV_BRANCH))
@parametrize(producer_version=str(LATEST_0_10), consumer_version=str(LATEST_0_10))
@parametrize(producer_version=str(LATEST_0_9), consumer_version=str(LATEST_0_9))
def test_compatibility(self, producer_version, consumer_version):
self.kafka = KafkaService(self.test_context, num_nodes=3, zk=self.zk, version=DEV_BRANCH, topics={self.topic: {
"partitions": 3,
"replication-factor": 3,
'configs': {"min.insync.replicas": 2}}})
self.kafka.start()
self.logger.info("First format change to 0.9.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_9))
self.produce_and_consume(producer_version, consumer_version, "group1")
self.logger.info("Second format change to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group2")
self.logger.info("Third format change to 0.11.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_11))
self.produce_and_consume(producer_version, consumer_version, "group3")
if producer_version == str(DEV_BRANCH) and consumer_version == str(DEV_BRANCH):
self.logger.info("Fourth format change back to 0.10.0")
self.kafka.alter_message_format(self.topic, str(LATEST_0_10))
self.produce_and_consume(producer_version, consumer_version, "group4")
| true
| true
|
790ba657f9b5e772aa7a7bec79f73aa71250de33
| 1,498
|
py
|
Python
|
jenkins/tagging/tagging.py
|
athiruma/cloud-governance
|
0515975090046266bce70990e4e269ae6ab03296
|
[
"Apache-2.0"
] | null | null | null |
jenkins/tagging/tagging.py
|
athiruma/cloud-governance
|
0515975090046266bce70990e4e269ae6ab03296
|
[
"Apache-2.0"
] | 1
|
2022-02-02T17:38:05.000Z
|
2022-02-02T17:38:05.000Z
|
jenkins/tagging/tagging.py
|
athiruma/cloud-governance
|
0515975090046266bce70990e4e269ae6ab03296
|
[
"Apache-2.0"
] | null | null | null |
import os
AWS_ACCESS_KEY_ID_PERF = os.environ['AWS_ACCESS_KEY_ID_PERF']
AWS_SECRET_ACCESS_KEY_PERF = os.environ['AWS_SECRET_ACCESS_KEY_PERF']
AWS_ACCESS_KEY_ID_DELETE_PERF = os.environ['AWS_ACCESS_KEY_ID_DELETE_PERF']
AWS_SECRET_ACCESS_KEY_DELETE_PERF = os.environ['AWS_SECRET_ACCESS_KEY_DELETE_PERF']
BUCKET_PERF = os.environ['BUCKET_PERF']
AWS_ACCESS_KEY_ID_PSAP = os.environ['AWS_ACCESS_KEY_ID_PSAP']
AWS_SECRET_ACCESS_KEY_PSAP = os.environ['AWS_SECRET_ACCESS_KEY_PSAP']
BUCKET_PSAP = os.environ['BUCKET_PSAP']
AWS_ACCESS_KEY_ID_RH_PERF = os.environ['AWS_ACCESS_KEY_ID_RH_PERF']
AWS_SECRET_ACCESS_KEY_RH_PERF = os.environ['AWS_SECRET_ACCESS_KEY_RH_PERF']
BUCKET_RH_PERF = os.environ['BUCKET_RH_PERF']
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
LOGS = os.environ.get('LOGS', 'logs')
mandatory_tags = {'Budget': 'PERF-DEPT'}
print('Run all policies pre active region')
regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
for region in regions:
os.system(f"""sudo podman run --rm --name cloud-governance-tagging -e account='perf' -e policy=tag_resources -e AWS_ACCESS_KEY_ID={AWS_ACCESS_KEY_ID_DELETE_PERF} -e AWS_SECRET_ACCESS_KEY={AWS_SECRET_ACCESS_KEY_DELETE_PERF} -e AWS_DEFAULT_REGION={region} -e tag_operation=update -e mandatory_tags="{mandatory_tags}" -e log_level=INFO -v /etc/localtime:/etc/localtime quay.io/ebattat/cloud-governance:latest""")
| 59.92
| 413
| 0.789052
|
import os
AWS_ACCESS_KEY_ID_PERF = os.environ['AWS_ACCESS_KEY_ID_PERF']
AWS_SECRET_ACCESS_KEY_PERF = os.environ['AWS_SECRET_ACCESS_KEY_PERF']
AWS_ACCESS_KEY_ID_DELETE_PERF = os.environ['AWS_ACCESS_KEY_ID_DELETE_PERF']
AWS_SECRET_ACCESS_KEY_DELETE_PERF = os.environ['AWS_SECRET_ACCESS_KEY_DELETE_PERF']
BUCKET_PERF = os.environ['BUCKET_PERF']
AWS_ACCESS_KEY_ID_PSAP = os.environ['AWS_ACCESS_KEY_ID_PSAP']
AWS_SECRET_ACCESS_KEY_PSAP = os.environ['AWS_SECRET_ACCESS_KEY_PSAP']
BUCKET_PSAP = os.environ['BUCKET_PSAP']
AWS_ACCESS_KEY_ID_RH_PERF = os.environ['AWS_ACCESS_KEY_ID_RH_PERF']
AWS_SECRET_ACCESS_KEY_RH_PERF = os.environ['AWS_SECRET_ACCESS_KEY_RH_PERF']
BUCKET_RH_PERF = os.environ['BUCKET_RH_PERF']
GITHUB_TOKEN = os.environ['GITHUB_TOKEN']
LOGS = os.environ.get('LOGS', 'logs')
mandatory_tags = {'Budget': 'PERF-DEPT'}
print('Run all policies pre active region')
regions = ['us-east-1', 'us-east-2', 'us-west-1', 'us-west-2', 'eu-central-1', 'ap-south-1', 'eu-north-1', 'ap-northeast-1', 'ap-southeast-1', 'ap-southeast-2', 'eu-west-3', 'sa-east-1']
for region in regions:
os.system(f"""sudo podman run --rm --name cloud-governance-tagging -e account='perf' -e policy=tag_resources -e AWS_ACCESS_KEY_ID={AWS_ACCESS_KEY_ID_DELETE_PERF} -e AWS_SECRET_ACCESS_KEY={AWS_SECRET_ACCESS_KEY_DELETE_PERF} -e AWS_DEFAULT_REGION={region} -e tag_operation=update -e mandatory_tags="{mandatory_tags}" -e log_level=INFO -v /etc/localtime:/etc/localtime quay.io/ebattat/cloud-governance:latest""")
| true
| true
|
790ba770ac1f7a168aec95e7bc2b402d1e813373
| 23,311
|
py
|
Python
|
modules/balancer/balancer.py
|
wijnandb/CodeCult-Scratch
|
9bbbfd3b4b2f147bfac75cb1b704f08c63a11969
|
[
"Apache-2.0"
] | null | null | null |
modules/balancer/balancer.py
|
wijnandb/CodeCult-Scratch
|
9bbbfd3b4b2f147bfac75cb1b704f08c63a11969
|
[
"Apache-2.0"
] | null | null | null |
modules/balancer/balancer.py
|
wijnandb/CodeCult-Scratch
|
9bbbfd3b4b2f147bfac75cb1b704f08c63a11969
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""External task balancer.
Overall architecture is:
1. Users interact with clients.
2. Clients make requests against the frontend's REST API.
3. The FE makes a REST call against a worker or worker pool identified by
gcb_external_task_balancer_worker_url. The FE provisions a unique token,
generates a Task instance, and dispatches a REST request to the worker or
worker pool.
4. The worker or worker pool exposes a REST API for use by the FE. Worker
responses contain the name of the worker so the FE can poll a specific worker
for results using the (ticket, name) combination. Workers are in charge both
of doing work and of cleaning up their results. Clients do not talk to
workers directly.
To enable, set up a pool of workers behind a single URL. For example, this might
be a set of machines behind a balancer on GCE or an AWS ELB. Next, set
gcb_external_task_balancer_rest_enabled to True and set
gcb_external_task_balancer_worker_url to the URL of your worker pool. Secure
communication if desired, and write a client against the REST API this module
exposes.
This implementation has the following big limitations:
1. It is insecure. Currently there is no token exchange/validation at the API
level, so anyone who gets a ticket (for example, by listening to HTTP
traffic between clients and the FE) can issue API calls.
2. There is no XSSI/XSRF protection. Note that exposed endpoints will 404 by
default because gcb_external_task_balancer_rest_enabled is False, so the
behavior without overrides does *not* expose unprotected REST endpoints.
3. Old task items hang around forever. Could implement garbage collection cron
to remove them past a TTL.
4. The REST api is missing ability to mark a single task for deletion and to
fetch a paginated list of results (without their payloads) for a given
user_id. Open issue: we do not expose the notion of a project in the REST
API, but we have it in the workers. Should we expose it to allow filtering at
the API level?
5. Add support for one balancer handling multiple pools of workers, not just
one.
6. Manager.mark* methods don't all check that the requested status transition is
valid. This means buggy handlers/workers/clients could cause invalid status
transitions. Fix is to have the Manager throw TransitionError in those cases
and modify the handlers to 400/500.
TODO(johncox): add URL of sample worker implementation once it's finished.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import urllib
from controllers import utils
from models import config
from models import custom_modules
from models import entities
from models import transforms
from google.appengine.api import urlfetch
from google.appengine.ext import db
_DISABLE_CACHING_HEADERS = {
'Cache-Control': 'max-age=0, must-revalidate',
'Pragma': 'no-cache',
}
_PAYLOAD = 'payload'
_TICKET = 'ticket'
_PROJECT_NAME = 'project'
_REST_URL_BASE = '/rest/balancer/v1'
_REST_URL_PROJECT = _REST_URL_BASE + '/project'
_REST_URL_TASK = _REST_URL_BASE
_STATUS = 'status'
_USER_ID = 'user_id'
_WORKER_DEADLINE_SECONDS = 5
_WORKER_ID = 'worker_id'
_WORKER_LOCKED = 'Worker locked'
_WORKER_LOCKED_MAX_RETRIES = 3
_LOG = logging.getLogger('modules.balancer.balancer')
logging.basicConfig()
EXTERNAL_TASK_BALANCER_REST_ENABLED = config.ConfigProperty(
'gcb_external_task_balancer_rest_enabled', bool,
('Whether or not to enable the REST endpoints for the external task '
'balancer module. You must also set the external task balancer URL '
'to use this feature.'), default_value=False,
label='Enable task balancer REST endpoints')
EXTERNAL_TASK_BALANCER_WORKER_URL = config.ConfigProperty(
'gcb_external_task_balancer_worker_url', str,
'URL for the worker pool used by the external task balancer module.',
default_value='', label='External task balancer worker URL')
class Error(Exception):
"""Base error class."""
class NotFoundError(Exception):
"""Raised when an op that needs an entity is run with a missing entity."""
class TransitionError(Exception):
"""Raised when an op attempts an invalid transition on a task."""
def _from_json(json_str):
"""Turns json -> object (or None if json cannot be parsed)."""
try:
return transforms.loads(json_str)
except: # Deliberately catching everything. pylint: disable=bare-except
return None
class Manager(object):
"""DAO for external tasks."""
# Treating access as module-protected. pylint: disable=protected-access
@classmethod
def create(cls, user_id=None):
"""Creates task and returns ticket string."""
task = _ExternalTask(status=_ExternalTask.CREATED, user_id=user_id)
return _ExternalTask.get_ticket_by_key(db.put(task))
@classmethod
def get(cls, ticket):
"""Gets task for ticket (or None if no matching task)."""
external_task = db.get(_ExternalTask.get_key_by_ticket(ticket))
if not external_task:
return None
return Task._from_external_task(external_task)
@classmethod
def list(cls, user_id):
"""Returns list of Task matching user_id, ordered by create date."""
return [Task._from_external_task(et) for et in sorted(
_ExternalTask.all().filter(
'%s =' % _ExternalTask.user_id.name, user_id
).fetch(1000), key=lambda task: task.create_date)]
@classmethod
@db.transactional
def mark_deleted(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.DELETED
db.put(task)
@classmethod
@db.transactional
def mark_done(cls, ticket, status, result):
if status not in _ExternalTask._TERMINAL_STATUSES:
raise TransitionError(
'mark_done called with non-terminal status ' + status)
task = cls._get_or_raise_not_found_error(ticket)
task.result = result
task.status = status
db.put(task)
@classmethod
@db.transactional
def mark_failed(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.FAILED
db.put(task)
@classmethod
@db.transactional
def mark_running(cls, ticket, worker_id):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.RUNNING
task.worker_id = worker_id
db.put(task)
@classmethod
def _delete(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
db.delete(key)
@classmethod
def _get_or_raise_not_found_error(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
task = db.get(key)
if not task:
raise NotFoundError
return task
class Task(object):
"""DTO for external tasks."""
def __init__(
self, change_date, create_date, result, status, ticket, user_id,
worker_id):
self.change_date = change_date
self.create_date = create_date
self.result = result
self.status = status
self.ticket = ticket
self.user_id = user_id
self.worker_id = worker_id
@classmethod
def _from_external_task(cls, external_task):
return cls(
external_task.change_date, external_task.create_date,
external_task.result, external_task.status,
external_task.get_ticket(), external_task.user_id,
external_task.worker_id)
def is_done(self):
return _ExternalTask.is_status_terminal(self.status)
def for_json(self):
return {
'change_date': self.change_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'create_date': self.create_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'result': self.result,
'status': self.status,
'ticket': self.ticket,
'user_id': self.user_id,
'worker_id': self.worker_id,
}
def __eq__(self, other):
return (
isinstance(other, Task) and
self.change_date == other.change_date and
self.create_date == other.create_date and
self.result == other.result and
self.status == other.status and
self.ticket == other.ticket and
self.user_id == other.user_id and
self.worker_id == other.worker_id)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return (
'Task - change_date: %(change_date)s, '
'create_date: %(create_date)s, result: %(result)s, '
'status: %(status)s, ticket: %(ticket)s, user_id: %(user_id)s, '
'worker_id: %(worker_id)s' % self.to_dict())
class _ExternalTask(entities.BaseEntity):
"""Storage for external tasks."""
# States a task may be in.
COMPLETE = 'complete' # Done running and in known success state.
CREATED = 'created' # Datastore entity created, but task not yet running.
DELETED = 'deleted' # Marked for deletion; could be deleted later.
FAILED = 'failed' # Done running and in known failure state.
RUNNING = 'running' # Currently running on a worker.
_PENDING_STATUSES = frozenset([
CREATED,
RUNNING,
])
_TERMINAL_STATUSES = frozenset([
COMPLETE,
DELETED,
FAILED,
])
STATUSES = _PENDING_STATUSES.union(_TERMINAL_STATUSES)
# When the task was last edited.
change_date = db.DateTimeProperty(required=True, auto_now=True)
# When the task was created.
create_date = db.DateTimeProperty(required=True, auto_now_add=True)
# Output of the task in JSON.
result = db.TextProperty()
# Last observed status of the task. Can be inaccurate: for example, if a
# user creates a new task but navigates away before the task completes and
# their client never fetches the task when it's done, we'll still show it
# running.
status = db.StringProperty(required=True, choices=STATUSES)
# Optional identifier for the user who owns the task. We impose no
# restrictions beyond the identifier being a string <= 500B, per datastore.
user_id = db.StringProperty()
# Identifier for the worker.
worker_id = db.StringProperty()
@classmethod
def get_key_by_ticket(cls, ticket_str):
try:
return db.Key(encoded=ticket_str)
except:
raise ValueError(
'Cannot make _ExternalTask key from ticket value: %s' % (
ticket_str))
@classmethod
def get_ticket_by_key(cls, key):
return str(key)
@classmethod
def is_status_terminal(cls, status):
return status in cls._TERMINAL_STATUSES
def get_ticket(self):
"""Returns string identifier for the task; raises NotSavedError."""
return self.get_ticket_by_key(self.key())
class _Operation(object):
"""Base class for wire operation payloads."""
@classmethod
def from_str(cls, raw_str):
return cls._from_json(transforms.loads(raw_str))
@classmethod
def _from_json(cls, parsed):
# Parse and validate raw input, raising ValueError if necessary.
raise NotImplementedError
def ready(self):
"""True iff the operation has all data it needs to be issued."""
raise NotImplementedError
def to_json(self):
return transforms.dumps(self._to_dict())
def to_url(self):
return urllib.quote_plus(self.to_json())
def update(self, updates_dict):
for k, v in updates_dict.iteritems():
if not hasattr(self, k):
raise ValueError('Cannot set name ' + k)
setattr(self, k, v)
def _to_dict(self):
raise NotImplementedError
class _CreateTaskOperation(_Operation):
def __init__(self, payload, ticket, user_id):
self.payload = payload
self.ticket = ticket
self.user_id = user_id
@classmethod
def _from_json(cls, parsed):
return cls(parsed, None, parsed.get(_USER_ID))
def ready(self):
return self.payload is not None and self.ticket is not None
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_USER_ID: self.user_id,
}
class _GetProjectOperation(_Operation):
def __init__(self, payload):
self.payload = payload
@classmethod
def _from_json(cls, parsed):
return cls(parsed)
def ready(self):
return self.payload is not None
def _to_dict(self):
return {_PAYLOAD: self.payload}
class _GetTaskOperation(_Operation):
def __init__(self, payload, ticket, worker_id):
self.payload = payload
self.ticket = ticket
self.worker_id = worker_id
@classmethod
def _from_json(cls, parsed):
ticket = parsed.get(_TICKET)
if not ticket:
raise ValueError('%s not set' % _TICKET)
return cls(parsed, ticket, parsed.get(_WORKER_ID))
def ready(self):
return (
self.payload is not None and self.ticket is not None and
self.worker_id is not None)
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_WORKER_ID: self.worker_id,
}
class _WorkerPool(object):
"""Interface for the pool of machines that do background work."""
@classmethod
def _check_response(cls, response):
return response.has_key(_PAYLOAD)
@classmethod
def _do_fetch(cls, url, method, operation):
try:
response = urlfetch.fetch(
cls._get_url(url, method, operation),
deadline=_WORKER_DEADLINE_SECONDS,
headers=_DISABLE_CACHING_HEADERS, method=method,
payload=cls._get_request_body(method, operation))
return (
response.status_code, cls._transform_response(response))
except urlfetch.DownloadError as e: # 4xx, 5xx, timeouts.
_LOG.error('Unable to dispatch request to pool; error: %s', e)
return 500, {_PAYLOAD: 'Unable to dispatch request'}
@classmethod
def _get_base_url(cls, worker_id=None):
base = (
worker_id if worker_id is not None else
EXTERNAL_TASK_BALANCER_WORKER_URL.value)
return base + '/rest/v1'
@classmethod
def _get_create_task_url(cls):
return cls._get_base_url()
@classmethod
def _get_get_project_url(cls):
return cls._get_base_url() + '/project'
@classmethod
def _get_get_task_url(cls, worker_id):
return cls._get_base_url(worker_id=worker_id)
@classmethod
def _get_request_body(cls, method, operation):
if method == 'GET':
return None
return operation.to_json()
@classmethod
def _get_url(cls, url, method, operation):
if method == 'GET':
return '%s?request=%s' % (url, operation.to_url())
return url
@classmethod
def _transform_response(cls, response):
"""Transforms worker success/error responses into a standard format."""
try:
parsed = transforms.loads(response.content)
if not cls._check_response(parsed):
raise ValueError
return {_PAYLOAD: parsed[_PAYLOAD]}
except: # Catch everything on purpose. pylint: disable=bare-except
_LOG.error(
'Unable to parse worker response: ' + response.content)
return {_PAYLOAD: 'Received invalid response'}
@classmethod
def create_task(cls, operation):
return cls._do_fetch(cls._get_create_task_url(), 'POST', operation)
@classmethod
def get_project(cls, operation):
return cls._do_fetch(cls._get_get_project_url(), 'GET', operation)
@classmethod
def get_task(cls, operation):
return cls._do_fetch(
cls._get_get_task_url(operation.worker_id), 'GET', operation)
class _BaseRestHandler(utils.BaseRESTHandler):
def _send_json_response(self, code, response):
self.response.headers['Content-Disposition'] = 'attachment'
self.response.headers['Content-Type'] = (
'application/javascript; charset=utf-8')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.status_code = code
self.response.write(transforms.dumps(response))
def _check_config_or_send_error(self):
if not EXTERNAL_TASK_BALANCER_REST_ENABLED.value:
self._send_json_response(404, 'Not found.')
return False
elif not EXTERNAL_TASK_BALANCER_WORKER_URL.value:
self._send_json_response(500, 'No worker pool found.')
return False
return True
class _ProjectRestHandler(_BaseRestHandler):
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetProjectOperation.from_str(self.request.get('request'))
except ValueError:
self._send_json_response(400, 'Bad request')
return
self._send_json_response(*_WorkerPool.get_project(op))
class _TaskRestHandler(_BaseRestHandler):
def _get_payload(self, response):
return response.get(_PAYLOAD)
def _get_status(self, response):
return self._get_payload(response).get(_STATUS)
def _get_task_payload(self, response):
return response.get(_PAYLOAD).get(_PAYLOAD)
def _get_ticket(self, response):
return self._get_payload(response).get(_TICKET)
def _get_worker_id(self, response):
return self._get_payload(response).get(_WORKER_ID)
def _retry_create_task(self, response, op):
tries = 0
while tries < _WORKER_LOCKED_MAX_RETRIES:
tries += 1
_LOG.info('Worker locked; retrying (tries: %s)', tries)
code, response = _WorkerPool.create_task(op)
if not self._worker_locked(response):
return code, response
return code, {_PAYLOAD: _WORKER_LOCKED}
def _worker_locked(self, response):
return response.get(_PAYLOAD) == _WORKER_LOCKED
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
task = None
try:
task = Manager.get(op.ticket)
except ValueError:
pass # Invalid ticket; handle as 404.
if not task:
self._send_json_response(
404, 'Task not found for ticket %s' % op.ticket)
return
if task.is_done():
self._send_json_response(200, task.for_json())
return
op.update({_WORKER_ID: task.worker_id})
if not op.ready():
# If the operation cannot be issued now, the most likely cause is
# that a past response from a worker contained insufficient data to
# dispatch requests to that worker (for example, it might not have)
# set the worker_id). We cannot recover; all we can do is signal
# likely programmer error.
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.get_task(op)
if code != 200:
self._send_json_response(code, response)
return
status = self._get_status(response)
if status is None:
self._send_json_response(500, 'Worker sent partial response')
return
elif _ExternalTask.is_status_terminal(status):
try:
payload = self._get_task_payload(response)
Manager.mark_done(op.ticket, status, payload)
except: # Catch everything. pylint: disable=bare-except
# TODO(johncox): could differentiate here and transition to a
# failed state when the payload is too big so we don't force
# unnecessary refetches against workers.
self._send_json_response(
500, 'Invalid worker status or payload too big')
return
self._send_json_response(*_WorkerPool.get_task(op))
def post(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _CreateTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
# Must allocate ticket at storage level for wire ops against worker, so
# we cannot create the task in one datastore call.
ticket = Manager.create(user_id=op.user_id)
op.update({_TICKET: ticket})
if not op.ready():
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.create_task(op)
if self._worker_locked(response):
code, response = self._retry_create_task(response, op)
if code != 200:
Manager.mark_failed(ticket)
self._send_json_response(500, self._get_payload(response))
return
request_failed = code != 200
ticket_mismatch = self._get_ticket(response) != ticket
if request_failed or ticket_mismatch:
response = 'Ticket mismatch' if ticket_mismatch else 'Worker failed'
Manager.mark_failed(ticket)
self._send_json_response(500, response)
else: # Worker response indicates success.
Manager.mark_running(ticket, self._get_worker_id(response))
self._send_json_response(code, response)
custom_module = None
def register_module():
global custom_module # pylint: disable=global-statement
global_handlers = [
(_REST_URL_TASK, _TaskRestHandler),
(_REST_URL_PROJECT, _ProjectRestHandler),
]
namespaced_handlers = []
custom_module = custom_modules.Module(
'External Task Balancer', 'External Task Balancer', global_handlers,
namespaced_handlers)
return custom_module
| 33.253923
| 80
| 0.658616
|
__author__ = [
'johncox@google.com (John Cox)',
]
import logging
import urllib
from controllers import utils
from models import config
from models import custom_modules
from models import entities
from models import transforms
from google.appengine.api import urlfetch
from google.appengine.ext import db
_DISABLE_CACHING_HEADERS = {
'Cache-Control': 'max-age=0, must-revalidate',
'Pragma': 'no-cache',
}
_PAYLOAD = 'payload'
_TICKET = 'ticket'
_PROJECT_NAME = 'project'
_REST_URL_BASE = '/rest/balancer/v1'
_REST_URL_PROJECT = _REST_URL_BASE + '/project'
_REST_URL_TASK = _REST_URL_BASE
_STATUS = 'status'
_USER_ID = 'user_id'
_WORKER_DEADLINE_SECONDS = 5
_WORKER_ID = 'worker_id'
_WORKER_LOCKED = 'Worker locked'
_WORKER_LOCKED_MAX_RETRIES = 3
_LOG = logging.getLogger('modules.balancer.balancer')
logging.basicConfig()
EXTERNAL_TASK_BALANCER_REST_ENABLED = config.ConfigProperty(
'gcb_external_task_balancer_rest_enabled', bool,
('Whether or not to enable the REST endpoints for the external task '
'balancer module. You must also set the external task balancer URL '
'to use this feature.'), default_value=False,
label='Enable task balancer REST endpoints')
EXTERNAL_TASK_BALANCER_WORKER_URL = config.ConfigProperty(
'gcb_external_task_balancer_worker_url', str,
'URL for the worker pool used by the external task balancer module.',
default_value='', label='External task balancer worker URL')
class Error(Exception):
class NotFoundError(Exception):
class TransitionError(Exception):
def _from_json(json_str):
try:
return transforms.loads(json_str)
except:
return None
class Manager(object):
@classmethod
def create(cls, user_id=None):
task = _ExternalTask(status=_ExternalTask.CREATED, user_id=user_id)
return _ExternalTask.get_ticket_by_key(db.put(task))
@classmethod
def get(cls, ticket):
external_task = db.get(_ExternalTask.get_key_by_ticket(ticket))
if not external_task:
return None
return Task._from_external_task(external_task)
@classmethod
def list(cls, user_id):
return [Task._from_external_task(et) for et in sorted(
_ExternalTask.all().filter(
'%s =' % _ExternalTask.user_id.name, user_id
).fetch(1000), key=lambda task: task.create_date)]
@classmethod
@db.transactional
def mark_deleted(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.DELETED
db.put(task)
@classmethod
@db.transactional
def mark_done(cls, ticket, status, result):
if status not in _ExternalTask._TERMINAL_STATUSES:
raise TransitionError(
'mark_done called with non-terminal status ' + status)
task = cls._get_or_raise_not_found_error(ticket)
task.result = result
task.status = status
db.put(task)
@classmethod
@db.transactional
def mark_failed(cls, ticket):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.FAILED
db.put(task)
@classmethod
@db.transactional
def mark_running(cls, ticket, worker_id):
task = cls._get_or_raise_not_found_error(ticket)
task.status = _ExternalTask.RUNNING
task.worker_id = worker_id
db.put(task)
@classmethod
def _delete(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
db.delete(key)
@classmethod
def _get_or_raise_not_found_error(cls, ticket):
key = _ExternalTask.get_key_by_ticket(ticket)
task = db.get(key)
if not task:
raise NotFoundError
return task
class Task(object):
def __init__(
self, change_date, create_date, result, status, ticket, user_id,
worker_id):
self.change_date = change_date
self.create_date = create_date
self.result = result
self.status = status
self.ticket = ticket
self.user_id = user_id
self.worker_id = worker_id
@classmethod
def _from_external_task(cls, external_task):
return cls(
external_task.change_date, external_task.create_date,
external_task.result, external_task.status,
external_task.get_ticket(), external_task.user_id,
external_task.worker_id)
def is_done(self):
return _ExternalTask.is_status_terminal(self.status)
def for_json(self):
return {
'change_date': self.change_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'create_date': self.create_date.strftime(
transforms.ISO_8601_DATETIME_FORMAT),
'result': self.result,
'status': self.status,
'ticket': self.ticket,
'user_id': self.user_id,
'worker_id': self.worker_id,
}
def __eq__(self, other):
return (
isinstance(other, Task) and
self.change_date == other.change_date and
self.create_date == other.create_date and
self.result == other.result and
self.status == other.status and
self.ticket == other.ticket and
self.user_id == other.user_id and
self.worker_id == other.worker_id)
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return (
'Task - change_date: %(change_date)s, '
'create_date: %(create_date)s, result: %(result)s, '
'status: %(status)s, ticket: %(ticket)s, user_id: %(user_id)s, '
'worker_id: %(worker_id)s' % self.to_dict())
class _ExternalTask(entities.BaseEntity):
COMPLETE = 'complete'
CREATED = 'created'
DELETED = 'deleted'
FAILED = 'failed'
RUNNING = 'running'
_PENDING_STATUSES = frozenset([
CREATED,
RUNNING,
])
_TERMINAL_STATUSES = frozenset([
COMPLETE,
DELETED,
FAILED,
])
STATUSES = _PENDING_STATUSES.union(_TERMINAL_STATUSES)
change_date = db.DateTimeProperty(required=True, auto_now=True)
create_date = db.DateTimeProperty(required=True, auto_now_add=True)
result = db.TextProperty()
status = db.StringProperty(required=True, choices=STATUSES)
user_id = db.StringProperty()
worker_id = db.StringProperty()
@classmethod
def get_key_by_ticket(cls, ticket_str):
try:
return db.Key(encoded=ticket_str)
except:
raise ValueError(
'Cannot make _ExternalTask key from ticket value: %s' % (
ticket_str))
@classmethod
def get_ticket_by_key(cls, key):
return str(key)
@classmethod
def is_status_terminal(cls, status):
return status in cls._TERMINAL_STATUSES
def get_ticket(self):
return self.get_ticket_by_key(self.key())
class _Operation(object):
@classmethod
def from_str(cls, raw_str):
return cls._from_json(transforms.loads(raw_str))
@classmethod
def _from_json(cls, parsed):
raise NotImplementedError
def ready(self):
raise NotImplementedError
def to_json(self):
return transforms.dumps(self._to_dict())
def to_url(self):
return urllib.quote_plus(self.to_json())
def update(self, updates_dict):
for k, v in updates_dict.iteritems():
if not hasattr(self, k):
raise ValueError('Cannot set name ' + k)
setattr(self, k, v)
def _to_dict(self):
raise NotImplementedError
class _CreateTaskOperation(_Operation):
def __init__(self, payload, ticket, user_id):
self.payload = payload
self.ticket = ticket
self.user_id = user_id
@classmethod
def _from_json(cls, parsed):
return cls(parsed, None, parsed.get(_USER_ID))
def ready(self):
return self.payload is not None and self.ticket is not None
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_USER_ID: self.user_id,
}
class _GetProjectOperation(_Operation):
def __init__(self, payload):
self.payload = payload
@classmethod
def _from_json(cls, parsed):
return cls(parsed)
def ready(self):
return self.payload is not None
def _to_dict(self):
return {_PAYLOAD: self.payload}
class _GetTaskOperation(_Operation):
def __init__(self, payload, ticket, worker_id):
self.payload = payload
self.ticket = ticket
self.worker_id = worker_id
@classmethod
def _from_json(cls, parsed):
ticket = parsed.get(_TICKET)
if not ticket:
raise ValueError('%s not set' % _TICKET)
return cls(parsed, ticket, parsed.get(_WORKER_ID))
def ready(self):
return (
self.payload is not None and self.ticket is not None and
self.worker_id is not None)
def _to_dict(self):
return {
_PAYLOAD: self.payload,
_TICKET: self.ticket,
_WORKER_ID: self.worker_id,
}
class _WorkerPool(object):
@classmethod
def _check_response(cls, response):
return response.has_key(_PAYLOAD)
@classmethod
def _do_fetch(cls, url, method, operation):
try:
response = urlfetch.fetch(
cls._get_url(url, method, operation),
deadline=_WORKER_DEADLINE_SECONDS,
headers=_DISABLE_CACHING_HEADERS, method=method,
payload=cls._get_request_body(method, operation))
return (
response.status_code, cls._transform_response(response))
except urlfetch.DownloadError as e:
_LOG.error('Unable to dispatch request to pool; error: %s', e)
return 500, {_PAYLOAD: 'Unable to dispatch request'}
@classmethod
def _get_base_url(cls, worker_id=None):
base = (
worker_id if worker_id is not None else
EXTERNAL_TASK_BALANCER_WORKER_URL.value)
return base + '/rest/v1'
@classmethod
def _get_create_task_url(cls):
return cls._get_base_url()
@classmethod
def _get_get_project_url(cls):
return cls._get_base_url() + '/project'
@classmethod
def _get_get_task_url(cls, worker_id):
return cls._get_base_url(worker_id=worker_id)
@classmethod
def _get_request_body(cls, method, operation):
if method == 'GET':
return None
return operation.to_json()
@classmethod
def _get_url(cls, url, method, operation):
if method == 'GET':
return '%s?request=%s' % (url, operation.to_url())
return url
@classmethod
def _transform_response(cls, response):
try:
parsed = transforms.loads(response.content)
if not cls._check_response(parsed):
raise ValueError
return {_PAYLOAD: parsed[_PAYLOAD]}
except:
_LOG.error(
'Unable to parse worker response: ' + response.content)
return {_PAYLOAD: 'Received invalid response'}
@classmethod
def create_task(cls, operation):
return cls._do_fetch(cls._get_create_task_url(), 'POST', operation)
@classmethod
def get_project(cls, operation):
return cls._do_fetch(cls._get_get_project_url(), 'GET', operation)
@classmethod
def get_task(cls, operation):
return cls._do_fetch(
cls._get_get_task_url(operation.worker_id), 'GET', operation)
class _BaseRestHandler(utils.BaseRESTHandler):
def _send_json_response(self, code, response):
self.response.headers['Content-Disposition'] = 'attachment'
self.response.headers['Content-Type'] = (
'application/javascript; charset=utf-8')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.status_code = code
self.response.write(transforms.dumps(response))
def _check_config_or_send_error(self):
if not EXTERNAL_TASK_BALANCER_REST_ENABLED.value:
self._send_json_response(404, 'Not found.')
return False
elif not EXTERNAL_TASK_BALANCER_WORKER_URL.value:
self._send_json_response(500, 'No worker pool found.')
return False
return True
class _ProjectRestHandler(_BaseRestHandler):
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetProjectOperation.from_str(self.request.get('request'))
except ValueError:
self._send_json_response(400, 'Bad request')
return
self._send_json_response(*_WorkerPool.get_project(op))
class _TaskRestHandler(_BaseRestHandler):
def _get_payload(self, response):
return response.get(_PAYLOAD)
def _get_status(self, response):
return self._get_payload(response).get(_STATUS)
def _get_task_payload(self, response):
return response.get(_PAYLOAD).get(_PAYLOAD)
def _get_ticket(self, response):
return self._get_payload(response).get(_TICKET)
def _get_worker_id(self, response):
return self._get_payload(response).get(_WORKER_ID)
def _retry_create_task(self, response, op):
tries = 0
while tries < _WORKER_LOCKED_MAX_RETRIES:
tries += 1
_LOG.info('Worker locked; retrying (tries: %s)', tries)
code, response = _WorkerPool.create_task(op)
if not self._worker_locked(response):
return code, response
return code, {_PAYLOAD: _WORKER_LOCKED}
def _worker_locked(self, response):
return response.get(_PAYLOAD) == _WORKER_LOCKED
def get(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _GetTaskOperation.from_str(self.request.get('request'))
except:
self._send_json_response(400, 'Bad request')
return
task = None
try:
task = Manager.get(op.ticket)
except ValueError:
pass
if not task:
self._send_json_response(
404, 'Task not found for ticket %s' % op.ticket)
return
if task.is_done():
self._send_json_response(200, task.for_json())
return
op.update({_WORKER_ID: task.worker_id})
if not op.ready():
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.get_task(op)
if code != 200:
self._send_json_response(code, response)
return
status = self._get_status(response)
if status is None:
self._send_json_response(500, 'Worker sent partial response')
return
elif _ExternalTask.is_status_terminal(status):
try:
payload = self._get_task_payload(response)
Manager.mark_done(op.ticket, status, payload)
except:
# unnecessary refetches against workers.
self._send_json_response(
500, 'Invalid worker status or payload too big')
return
self._send_json_response(*_WorkerPool.get_task(op))
def post(self):
configured = self._check_config_or_send_error()
if not configured:
return
try:
op = _CreateTaskOperation.from_str(self.request.get('request'))
except: # pylint: disable=bare-except
self._send_json_response(400, 'Bad request')
return
# Must allocate ticket at storage level for wire ops against worker, so
# we cannot create the task in one datastore call.
ticket = Manager.create(user_id=op.user_id)
op.update({_TICKET: ticket})
if not op.ready():
self._send_json_response(
500, 'Unable to compose request for worker')
return
code, response = _WorkerPool.create_task(op)
if self._worker_locked(response):
code, response = self._retry_create_task(response, op)
if code != 200:
Manager.mark_failed(ticket)
self._send_json_response(500, self._get_payload(response))
return
request_failed = code != 200
ticket_mismatch = self._get_ticket(response) != ticket
if request_failed or ticket_mismatch:
response = 'Ticket mismatch' if ticket_mismatch else 'Worker failed'
Manager.mark_failed(ticket)
self._send_json_response(500, response)
else: # Worker response indicates success.
Manager.mark_running(ticket, self._get_worker_id(response))
self._send_json_response(code, response)
custom_module = None
def register_module():
global custom_module # pylint: disable=global-statement
global_handlers = [
(_REST_URL_TASK, _TaskRestHandler),
(_REST_URL_PROJECT, _ProjectRestHandler),
]
namespaced_handlers = []
custom_module = custom_modules.Module(
'External Task Balancer', 'External Task Balancer', global_handlers,
namespaced_handlers)
return custom_module
| true
| true
|
790ba77a0d6d6845a0e9840cf6ef07a9856814a9
| 8,833
|
py
|
Python
|
pynm/commands/metric.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 1
|
2018-08-16T20:48:52.000Z
|
2018-08-16T20:48:52.000Z
|
pynm/commands/metric.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | 5
|
2015-01-12T20:40:46.000Z
|
2017-11-17T01:27:41.000Z
|
pynm/commands/metric.py
|
ohtaman/pynm
|
b003962201e4270d0dab681ede37f2d8edd560f2
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import csv
import fileinput
import sys
import numpy
from pynm.feature.metric.itml import learn_metric, convert_data
class ItmlCommand:
name = 'itml'
help = 'Information Theoretic Metric Learning'
@classmethod
def build_arg_parser(cls, parser):
parser.add_argument('-i',
'--input_data',
default='-',
type=str,
metavar='FILE',
help='input data file (default: stdin)')
label_or_pair = parser.add_mutually_exclusive_group(required=True)
label_or_pair.add_argument('-l',
'--input_labels',
default=None,
type=str,
metavar='FILE',
help='input labels file')
label_or_pair.add_argument('-p',
'--input_pairs',
default=None,
type=str,
metavar='FILE',
help='input pairs file')
parser.add_argument('-o',
'--output_data',
default=None,
type=str,
metavar='FILE',
help='output data file')
parser.add_argument('-m',
'--output_metric',
default=None,
type=str,
metavar='FILE',
help='output metric file')
parser.add_argument('-w',
'--output_weights',
default=None,
type=str,
metavar='FILE',
help='output weights file')
parser.add_argument('-d',
'--delimiter',
default='\t',
type=str,
metavar='DELIM',
help='delimiter (default: "\\t")')
parser.add_argument('-s',
'--sparse',
action='store_true',
help='sparse format (not implemented yet)')
parser.add_argument('--header',
action='store_true',
help='has header')
parser.add_argument('-U',
'--u_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='U parameter (max distance for same labels, default: 1.0)')
parser.add_argument('-L',
'--l_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='L parameter (min distance for different labels, default: 1.0)')
parser.add_argument('-S',
'--slack',
default=1.0,
type=float,
metavar='SLACK',
help='slack variable (default: 1.0)')
parser.add_argument('-N',
'--max_iteration_number',
default=1000,
type=int,
metavar='MAX',
help='max iteration (default: 1000)')
def run(self, args):
with fileinput.input(args.input_data) as in_:
header, data = self.load_data(in_,
delimiter=args.delimiter,
has_header=args.header)
if args.input_labels is not None:
with fileinput.input(args.input_labels) as in_:
labels = self.load_labels(in_)
pairs = None
elif args.input_pairs is not None:
with fileinput.input(args.input_pairs) as in_:
pairs = self.load_pairs(in_)
labels = None
metric = learn_metric(data,
labels=labels,
pairs=pairs,
u=args.u_param,
l=args.l_param,
slack=args.slack,
max_iter=args.max_iteration_number,
is_sparse=args.sparse)
if args.output_metric is not None:
if args.output_metric == '-':
self.export_metric(sys.stdout, metric, header)
else:
with open(args.output_metric, 'w') as o_:
self.export_metric(o_, metric, header)
if args.output_weights is not None:
weights = numpy.diag(metric)
if args.output_weights == '-':
self.export_weights(sys.stdout, weights, header)
else:
with open(args.output_weights, 'w') as o_:
self.export_weights(o_, weights, header)
if args.output_data is not None:
converted_data = convert_data(metric, data)
if args.output_data == '-':
self.export_data(sys.stdout, converted_data, header)
else:
with open(args.output_data, 'w') as o_:
self.export_data(o_, converted_data, header)
return 0
def load_data(self,
input_data,
delimiter='\t',
has_header=False):
reader = csv.reader(input_data, delimiter=delimiter)
if has_header:
header = {value: key for key, value in enumerate(reader.next())}
else:
header = None
data = []
for row in reader:
data.append(numpy.array(list(map(lambda x: float(x), row))))
return header, data
def load_labels(self, input_labels):
return list(map(lambda x: int(x), input_labels))
def load_pairs(self, input_pairs, delimiter='\t', header=None):
pairs = []
if header is None:
for line in input_pairs:
row = line.split(delimiter)
idx1 = int(row[0])
idx2 = int(row[1])
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
else:
for line in input_pairs:
row = line.split(delimiter)
idx1 = header[row[0]]
idx2 = header[row[1]]
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
return pairs
def export_metric(self,
output,
metric,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in metric:
writer.writerow(row)
def export_weights(self,
output,
weights,
header=None):
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
writer.writerow(weights)
def export_data(self,
output,
data,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in data:
writer.writerow(row)
class MetricCommand:
name = 'metric'
help = 'Metric Learning'
sub_commands = [ItmlCommand]
default_command = sub_commands[0]
def build_arg_parser(self, parser):
self.default_command.build_arg_parser(parser)
subparsers = parser.add_subparsers(title='algorithm', dest='algorithm')
for command in self.sub_commands:
subparser = subparsers.add_parser(command.name, help=command.help)
command.build_arg_parser(subparser)
def run(self, args):
sub_command = self._get_sub_command(args.algorithm)
return sub_command.run(args)
def _get_sub_command(self, algorithm):
if algorithm is None:
return self.default_command()
return next(filter(lambda x: x.name == algorithm, self.sub_commands))()
| 37.270042
| 97
| 0.452621
|
import csv
import fileinput
import sys
import numpy
from pynm.feature.metric.itml import learn_metric, convert_data
class ItmlCommand:
name = 'itml'
help = 'Information Theoretic Metric Learning'
@classmethod
def build_arg_parser(cls, parser):
parser.add_argument('-i',
'--input_data',
default='-',
type=str,
metavar='FILE',
help='input data file (default: stdin)')
label_or_pair = parser.add_mutually_exclusive_group(required=True)
label_or_pair.add_argument('-l',
'--input_labels',
default=None,
type=str,
metavar='FILE',
help='input labels file')
label_or_pair.add_argument('-p',
'--input_pairs',
default=None,
type=str,
metavar='FILE',
help='input pairs file')
parser.add_argument('-o',
'--output_data',
default=None,
type=str,
metavar='FILE',
help='output data file')
parser.add_argument('-m',
'--output_metric',
default=None,
type=str,
metavar='FILE',
help='output metric file')
parser.add_argument('-w',
'--output_weights',
default=None,
type=str,
metavar='FILE',
help='output weights file')
parser.add_argument('-d',
'--delimiter',
default='\t',
type=str,
metavar='DELIM',
help='delimiter (default: "\\t")')
parser.add_argument('-s',
'--sparse',
action='store_true',
help='sparse format (not implemented yet)')
parser.add_argument('--header',
action='store_true',
help='has header')
parser.add_argument('-U',
'--u_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='U parameter (max distance for same labels, default: 1.0)')
parser.add_argument('-L',
'--l_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='L parameter (min distance for different labels, default: 1.0)')
parser.add_argument('-S',
'--slack',
default=1.0,
type=float,
metavar='SLACK',
help='slack variable (default: 1.0)')
parser.add_argument('-N',
'--max_iteration_number',
default=1000,
type=int,
metavar='MAX',
help='max iteration (default: 1000)')
def run(self, args):
with fileinput.input(args.input_data) as in_:
header, data = self.load_data(in_,
delimiter=args.delimiter,
has_header=args.header)
if args.input_labels is not None:
with fileinput.input(args.input_labels) as in_:
labels = self.load_labels(in_)
pairs = None
elif args.input_pairs is not None:
with fileinput.input(args.input_pairs) as in_:
pairs = self.load_pairs(in_)
labels = None
metric = learn_metric(data,
labels=labels,
pairs=pairs,
u=args.u_param,
l=args.l_param,
slack=args.slack,
max_iter=args.max_iteration_number,
is_sparse=args.sparse)
if args.output_metric is not None:
if args.output_metric == '-':
self.export_metric(sys.stdout, metric, header)
else:
with open(args.output_metric, 'w') as o_:
self.export_metric(o_, metric, header)
if args.output_weights is not None:
weights = numpy.diag(metric)
if args.output_weights == '-':
self.export_weights(sys.stdout, weights, header)
else:
with open(args.output_weights, 'w') as o_:
self.export_weights(o_, weights, header)
if args.output_data is not None:
converted_data = convert_data(metric, data)
if args.output_data == '-':
self.export_data(sys.stdout, converted_data, header)
else:
with open(args.output_data, 'w') as o_:
self.export_data(o_, converted_data, header)
return 0
def load_data(self,
input_data,
delimiter='\t',
has_header=False):
reader = csv.reader(input_data, delimiter=delimiter)
if has_header:
header = {value: key for key, value in enumerate(reader.next())}
else:
header = None
data = []
for row in reader:
data.append(numpy.array(list(map(lambda x: float(x), row))))
return header, data
def load_labels(self, input_labels):
return list(map(lambda x: int(x), input_labels))
def load_pairs(self, input_pairs, delimiter='\t', header=None):
pairs = []
if header is None:
for line in input_pairs:
row = line.split(delimiter)
idx1 = int(row[0])
idx2 = int(row[1])
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
else:
for line in input_pairs:
row = line.split(delimiter)
idx1 = header[row[0]]
idx2 = header[row[1]]
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
return pairs
def export_metric(self,
output,
metric,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in metric:
writer.writerow(row)
def export_weights(self,
output,
weights,
header=None):
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
writer.writerow(weights)
def export_data(self,
output,
data,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in data:
writer.writerow(row)
class MetricCommand:
name = 'metric'
help = 'Metric Learning'
sub_commands = [ItmlCommand]
default_command = sub_commands[0]
def build_arg_parser(self, parser):
self.default_command.build_arg_parser(parser)
subparsers = parser.add_subparsers(title='algorithm', dest='algorithm')
for command in self.sub_commands:
subparser = subparsers.add_parser(command.name, help=command.help)
command.build_arg_parser(subparser)
def run(self, args):
sub_command = self._get_sub_command(args.algorithm)
return sub_command.run(args)
def _get_sub_command(self, algorithm):
if algorithm is None:
return self.default_command()
return next(filter(lambda x: x.name == algorithm, self.sub_commands))()
| true
| true
|
790ba7a8a47beb16bc3970fb72ea9b43d2f5717d
| 416
|
py
|
Python
|
sublime_exec.py
|
rgrannell1/sublime-exec
|
76311f47f8a3b7fd2969ab2a36f4140f21c4f320
|
[
"MIT"
] | null | null | null |
sublime_exec.py
|
rgrannell1/sublime-exec
|
76311f47f8a3b7fd2969ab2a36f4140f21c4f320
|
[
"MIT"
] | null | null | null |
sublime_exec.py
|
rgrannell1/sublime-exec
|
76311f47f8a3b7fd2969ab2a36f4140f21c4f320
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python 3
import os
import sublime
import sublime_plugin
import random
import re
import sys
import math
__version__ = '0.1.0'
__authors__ = ['Ryan Grannell (@RyanGrannell)']
class BabelCommand (sublime_plugin.WindowCommand):
"""
babel loads a random file from your
currently open folders.
"""
def run (self):
window = self.window
open_folders = window.folders()
# todo
| 11.885714
| 50
| 0.704327
|
import os
import sublime
import sublime_plugin
import random
import re
import sys
import math
__version__ = '0.1.0'
__authors__ = ['Ryan Grannell (@RyanGrannell)']
class BabelCommand (sublime_plugin.WindowCommand):
def run (self):
window = self.window
open_folders = window.folders()
| true
| true
|
790ba99b6826b9dacdfc3f4386e29754279b0b8c
| 4,849
|
py
|
Python
|
lsw_slackbot/slack.py
|
emilyhunt/lsw-slackbot
|
1069aee5046b30075db52e1735c33d0ca84d71c4
|
[
"BSD-3-Clause"
] | null | null | null |
lsw_slackbot/slack.py
|
emilyhunt/lsw-slackbot
|
1069aee5046b30075db52e1735c33d0ca84d71c4
|
[
"BSD-3-Clause"
] | null | null | null |
lsw_slackbot/slack.py
|
emilyhunt/lsw-slackbot
|
1069aee5046b30075db52e1735c33d0ca84d71c4
|
[
"BSD-3-Clause"
] | null | null | null |
"""Various functions that interact with Slack, e.g. posting messages."""
import asyncio
import logging
import socket
from pathlib import Path
from typing import Union, Optional
from slack_sdk.errors import SlackApiError
from lsw_slackbot.plots import plot_resource_use
from lsw_slackbot.resources import current_memory_fraction, _get_resource_usage_dataframe
from lsw_slackbot.util import string_time
async def _send_message(client, channel: str, message: str):
"""Sends a message to a channel, with basic logging & error handling."""
try:
await client.chat_postMessage(channel=channel, text=message)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from slack API when trying to send message: {e.response['error']}")
print("Encountered SlackApiError when trying to send message (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to send message. This bug has occured before!")
print("Encountered AttributeError when trying to send message (see logs.)")
async def _send_file(client, channel: str, file: Union[Path, str], title):
"""Sends a file to a channel, with basic logging & error handling."""
if isinstance(file, Path):
file = str(file.absolute())
try:
await client.files_upload(channels=channel, file=file, title=title)
# Handle various different errors, *some* of which are non-critical...
except SlackApiError as e:
logging.exception(f"error from Slack API when trying to upload file: {e.response['error']}")
print("Encountered SlackApiError when trying to upload file (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to upload file. This bug has occured before!")
print("Encountered AttributeError when trying to upload file (see logs.)")
async def hello_world(client, channel: str):
"""Basic function to post an init message to a channel."""
# Todo: it would be really cool if hello_world also printed the latest commit message.
# This could be done by running the command `git log -1` from Python?
# See https://stackoverflow.com/questions/7293008/display-last-git-commit-comment
logging.info(f"Saying hello world in {channel}!")
system_name = socket.gethostname()
await _send_message(
client, channel, f"Server time & date: {string_time()}\nApp is running on system {system_name}.")
async def send_resource_use_plot(client, channel: str, plot_kwargs: dict, title: Optional[str] = None):
"""Sends a resource usage plot to a given channel."""
if title is None:
title = f"Resource usage plot generated at {string_time()}"
else:
title = title + f" (plot generated at {string_time()})"
# Firstly, let's generate a plot
logging.info("Generating a resource usage plot")
logging.debug(f"plot kwargs: {plot_kwargs}")
location_plot = await plot_resource_use(**plot_kwargs)
# Now, let's try and send it to slack
logging.info(f"Sending to Slack in channel {channel}")
await _send_file(client, channel, location_plot, title)
_LAST_MEMORY_FRACTION = 0.0
async def check_memory(client, channel: str, memory_warn_fraction=0.8, sleep_time=3600):
"""Quick function for checking current server memory and sending a warning to a desired channel if it's
too high."""
global _LAST_MEMORY_FRACTION # Sorry for using global variables =(
current_usage = current_memory_fraction()
# Only warn if we didn't warn before
if _LAST_MEMORY_FRACTION < memory_warn_fraction:
if current_usage > memory_warn_fraction:
# Firstly, prioritise sending a basic warning
await _send_message(client, channel, f"WARNING: current memory usage at {current_usage:.2%}!")
# Next, grab info on currently running threads
thread_df = await _get_resource_usage_dataframe(measurement_time=1.0)
thread_df = thread_df.sort_values("memory")
# ... and format it into something we can send
message = ["Users with something currently running:"]
for i, a_row in thread_df.iterrows():
message.append(f"{a_row.name}: {a_row['cpu_percent']:.2f}% CPU "
f"-- {a_row['memory']:.2f} GB"
f"-- {a_row['threads']} threads")
message.append(f"\n(no further warnings will be sent for a sleep period of {sleep_time/60**2:.2f} hour(s))")
# Send it!
await _send_message(client, channel, "\n".join(message))
# Sleep so we don't spam the chat
await asyncio.sleep(sleep_time)
_LAST_MEMORY_FRACTION = current_usage
| 41.444444
| 120
| 0.690864
|
import asyncio
import logging
import socket
from pathlib import Path
from typing import Union, Optional
from slack_sdk.errors import SlackApiError
from lsw_slackbot.plots import plot_resource_use
from lsw_slackbot.resources import current_memory_fraction, _get_resource_usage_dataframe
from lsw_slackbot.util import string_time
async def _send_message(client, channel: str, message: str):
try:
await client.chat_postMessage(channel=channel, text=message)
except SlackApiError as e:
logging.exception(f"error from slack API when trying to send message: {e.response['error']}")
print("Encountered SlackApiError when trying to send message (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to send message. This bug has occured before!")
print("Encountered AttributeError when trying to send message (see logs.)")
async def _send_file(client, channel: str, file: Union[Path, str], title):
if isinstance(file, Path):
file = str(file.absolute())
try:
await client.files_upload(channels=channel, file=file, title=title)
except SlackApiError as e:
logging.exception(f"error from Slack API when trying to upload file: {e.response['error']}")
print("Encountered SlackApiError when trying to upload file (see logs.)")
except AttributeError:
logging.exception("suspected issue in Slack API when trying to upload file. This bug has occured before!")
print("Encountered AttributeError when trying to upload file (see logs.)")
async def hello_world(client, channel: str):
logging.info(f"Saying hello world in {channel}!")
system_name = socket.gethostname()
await _send_message(
client, channel, f"Server time & date: {string_time()}\nApp is running on system {system_name}.")
async def send_resource_use_plot(client, channel: str, plot_kwargs: dict, title: Optional[str] = None):
if title is None:
title = f"Resource usage plot generated at {string_time()}"
else:
title = title + f" (plot generated at {string_time()})"
logging.info("Generating a resource usage plot")
logging.debug(f"plot kwargs: {plot_kwargs}")
location_plot = await plot_resource_use(**plot_kwargs)
# Now, let's try and send it to slack
logging.info(f"Sending to Slack in channel {channel}")
await _send_file(client, channel, location_plot, title)
_LAST_MEMORY_FRACTION = 0.0
async def check_memory(client, channel: str, memory_warn_fraction=0.8, sleep_time=3600):
global _LAST_MEMORY_FRACTION
current_usage = current_memory_fraction()
if _LAST_MEMORY_FRACTION < memory_warn_fraction:
if current_usage > memory_warn_fraction:
# Firstly, prioritise sending a basic warning
await _send_message(client, channel, f"WARNING: current memory usage at {current_usage:.2%}!")
# Next, grab info on currently running threads
thread_df = await _get_resource_usage_dataframe(measurement_time=1.0)
thread_df = thread_df.sort_values("memory")
# ... and format it into something we can send
message = ["Users with something currently running:"]
for i, a_row in thread_df.iterrows():
message.append(f"{a_row.name}: {a_row['cpu_percent']:.2f}% CPU "
f"-- {a_row['memory']:.2f} GB"
f"-- {a_row['threads']} threads")
message.append(f"\n(no further warnings will be sent for a sleep period of {sleep_time/60**2:.2f} hour(s))")
# Send it!
await _send_message(client, channel, "\n".join(message))
# Sleep so we don't spam the chat
await asyncio.sleep(sleep_time)
_LAST_MEMORY_FRACTION = current_usage
| true
| true
|
790ba9f747aaf3a8cbdaffd0f2b7339afce84e14
| 8,599
|
py
|
Python
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 260
|
2018-06-18T14:57:00.000Z
|
2022-03-29T11:41:03.000Z
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 1,154
|
2018-06-19T20:38:20.000Z
|
2022-03-31T19:48:16.000Z
|
sdk/python/pulumi_aws/elasticloadbalancingv2/get_listener.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | 115
|
2018-06-28T03:20:27.000Z
|
2022-03-29T11:41:06.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetListenerResult',
'AwaitableGetListenerResult',
'get_listener',
'get_listener_output',
]
warnings.warn("""aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""", DeprecationWarning)
@pulumi.output_type
class GetListenerResult:
"""
A collection of values returned by getListener.
"""
def __init__(__self__, alpn_policy=None, arn=None, certificate_arn=None, default_actions=None, id=None, load_balancer_arn=None, port=None, protocol=None, ssl_policy=None, tags=None):
if alpn_policy and not isinstance(alpn_policy, str):
raise TypeError("Expected argument 'alpn_policy' to be a str")
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if certificate_arn and not isinstance(certificate_arn, str):
raise TypeError("Expected argument 'certificate_arn' to be a str")
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions and not isinstance(default_actions, list):
raise TypeError("Expected argument 'default_actions' to be a list")
pulumi.set(__self__, "default_actions", default_actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_arn and not isinstance(load_balancer_arn, str):
raise TypeError("Expected argument 'load_balancer_arn' to be a str")
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if ssl_policy and not isinstance(ssl_policy, str):
raise TypeError("Expected argument 'ssl_policy' to be a str")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> str:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> str:
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Sequence['outputs.GetListenerDefaultActionResult']:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> str:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> str:
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetListenerResult(GetListenerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetListenerResult(
alpn_policy=self.alpn_policy,
arn=self.arn,
certificate_arn=self.certificate_arn,
default_actions=self.default_actions,
id=self.id,
load_balancer_arn=self.load_balancer_arn,
port=self.port,
protocol=self.protocol,
ssl_policy=self.ssl_policy,
tags=self.tags)
def get_listener(arn: Optional[str] = None,
load_balancer_arn: Optional[str] = None,
port: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListenerResult:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(
alpn_policy=__ret__.alpn_policy,
arn=__ret__.arn,
certificate_arn=__ret__.certificate_arn,
default_actions=__ret__.default_actions,
id=__ret__.id,
load_balancer_arn=__ret__.load_balancer_arn,
port=__ret__.port,
protocol=__ret__.protocol,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags)
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_arn: Optional[pulumi.Input[Optional[str]]] = None,
port: Optional[pulumi.Input[Optional[int]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListenerResult]:
"""
> **Note:** `alb.Listener` is known as `lb.Listener`. The functionality is identical.
Provides information about a Load Balancer Listener.
This data source can prove useful when a module accepts an LB Listener as an input variable and needs to know the LB it is attached to, or other information specific to the listener in question.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
config = pulumi.Config()
listener_arn = config.require("listenerArn")
listener = aws.lb.get_listener(arn=listener_arn)
selected = aws.lb.get_load_balancer(name="default-public")
selected443 = aws.lb.get_listener(load_balancer_arn=selected.arn,
port=443)
```
:param str arn: ARN of the listener. Required if `load_balancer_arn` and `port` is not set.
:param str load_balancer_arn: ARN of the load balancer. Required if `arn` is not set.
:param int port: Port of the listener. Required if `arn` is not set.
"""
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
...
| 39.086364
| 198
| 0.671822
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetListenerResult',
'AwaitableGetListenerResult',
'get_listener',
'get_listener_output',
]
warnings.warn("""aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""", DeprecationWarning)
@pulumi.output_type
class GetListenerResult:
def __init__(__self__, alpn_policy=None, arn=None, certificate_arn=None, default_actions=None, id=None, load_balancer_arn=None, port=None, protocol=None, ssl_policy=None, tags=None):
if alpn_policy and not isinstance(alpn_policy, str):
raise TypeError("Expected argument 'alpn_policy' to be a str")
pulumi.set(__self__, "alpn_policy", alpn_policy)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if certificate_arn and not isinstance(certificate_arn, str):
raise TypeError("Expected argument 'certificate_arn' to be a str")
pulumi.set(__self__, "certificate_arn", certificate_arn)
if default_actions and not isinstance(default_actions, list):
raise TypeError("Expected argument 'default_actions' to be a list")
pulumi.set(__self__, "default_actions", default_actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if load_balancer_arn and not isinstance(load_balancer_arn, str):
raise TypeError("Expected argument 'load_balancer_arn' to be a str")
pulumi.set(__self__, "load_balancer_arn", load_balancer_arn)
if port and not isinstance(port, int):
raise TypeError("Expected argument 'port' to be a int")
pulumi.set(__self__, "port", port)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if ssl_policy and not isinstance(ssl_policy, str):
raise TypeError("Expected argument 'ssl_policy' to be a str")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="alpnPolicy")
def alpn_policy(self) -> str:
return pulumi.get(self, "alpn_policy")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="certificateArn")
def certificate_arn(self) -> str:
return pulumi.get(self, "certificate_arn")
@property
@pulumi.getter(name="defaultActions")
def default_actions(self) -> Sequence['outputs.GetListenerDefaultActionResult']:
return pulumi.get(self, "default_actions")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="loadBalancerArn")
def load_balancer_arn(self) -> str:
return pulumi.get(self, "load_balancer_arn")
@property
@pulumi.getter
def port(self) -> int:
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> str:
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
class AwaitableGetListenerResult(GetListenerResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetListenerResult(
alpn_policy=self.alpn_policy,
arn=self.arn,
certificate_arn=self.certificate_arn,
default_actions=self.default_actions,
id=self.id,
load_balancer_arn=self.load_balancer_arn,
port=self.port,
protocol=self.protocol,
ssl_policy=self.ssl_policy,
tags=self.tags)
def get_listener(arn: Optional[str] = None,
load_balancer_arn: Optional[str] = None,
port: Optional[int] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetListenerResult:
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
__args__ = dict()
__args__['arn'] = arn
__args__['loadBalancerArn'] = load_balancer_arn
__args__['port'] = port
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:elasticloadbalancingv2/getListener:getListener', __args__, opts=opts, typ=GetListenerResult).value
return AwaitableGetListenerResult(
alpn_policy=__ret__.alpn_policy,
arn=__ret__.arn,
certificate_arn=__ret__.certificate_arn,
default_actions=__ret__.default_actions,
id=__ret__.id,
load_balancer_arn=__ret__.load_balancer_arn,
port=__ret__.port,
protocol=__ret__.protocol,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags)
@_utilities.lift_output_func(get_listener)
def get_listener_output(arn: Optional[pulumi.Input[Optional[str]]] = None,
load_balancer_arn: Optional[pulumi.Input[Optional[str]]] = None,
port: Optional[pulumi.Input[Optional[int]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetListenerResult]:
pulumi.log.warn("""get_listener is deprecated: aws.elasticloadbalancingv2.getListener has been deprecated in favor of aws.lb.getListener""")
...
| true
| true
|
790baa2cdbf7e37f5b5914c5a65cfd0325fabf43
| 3,904
|
py
|
Python
|
percy/runner.py
|
robopsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 1
|
2017-10-31T11:29:24.000Z
|
2017-10-31T11:29:24.000Z
|
percy/runner.py
|
robopsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 1
|
2021-03-26T00:50:40.000Z
|
2021-03-26T00:50:40.000Z
|
percy/runner.py
|
rob-opsi/python-percy-client
|
c3a80ed567ad40b2f1eaaea76f0886aa6f0367eb
|
[
"MIT"
] | 2
|
2018-06-05T02:33:05.000Z
|
2021-03-02T11:17:47.000Z
|
from __future__ import print_function
import os
import percy
from percy import errors
from percy import utils
__all__ = ['Runner']
class Runner(object):
def __init__(self, loader=None, config=None, client=None):
self.loader = loader
self.config = config or percy.Config()
self.client = client or percy.Client(config=self.config)
self._current_build = None
self._is_enabled = os.getenv('PERCY_ENABLE', '1') == '1'
# Sanity check environment and auth setup. If in CI and Percy is disabled, print an error.
if self._is_enabled:
try:
self.client.config.access_token
except errors.AuthError:
if self.client.environment.current_ci:
utils.print_error('[percy] Warning: Percy is disabled, no PERCY_TOKEN set.')
self._is_enabled = False
def initialize_build(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
build_resources = []
build_resources = self.loader.build_resources if self.loader else []
sha_to_build_resource = {}
for build_resource in build_resources:
sha_to_build_resource[build_resource.sha] = build_resource
self._current_build = self.client.create_build(resources=build_resources, **kwargs)
try:
missing_resources = self._current_build['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
for missing_resource in missing_resources:
sha = missing_resource['id']
resource = sha_to_build_resource.get(sha)
# This resource should always exist, but if by chance it doesn't we make it safe here.
# A nicer error will be raised by the finalize API when the resource is still missing.
if resource:
print('Uploading new build resource: {}'.format(resource.resource_url))
# Optimization: we don't hold all build resources in memory. Instead we store a
# "local_path" variable that be used to read the file again if it is needed.
if resource.local_path:
with open(resource.local_path, 'rb') as f:
content = f.read()
else:
content = resource.content
self.client.upload_resource(self._current_build['data']['id'], content)
except KeyError:
print(self._current_build)
def snapshot(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError('Cannot call snapshot before build is initialized')
root_resource = self.loader.snapshot_resources[0]
build_id = self._current_build['data']['id']
snapshot_data = self.client.create_snapshot(build_id, [root_resource], **kwargs)
missing_resources = snapshot_data['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
if missing_resources:
# There can only be one missing resource in this case, the root_resource.
self.client.upload_resource(build_id, root_resource.content)
self.client.finalize_snapshot(snapshot_data['data']['id'])
def finalize_build(self):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError(
'Cannot finalize_build before build is initialized.')
self.client.finalize_build(self._current_build['data']['id'])
self._current_build = None
| 41.094737
| 102
| 0.627561
|
from __future__ import print_function
import os
import percy
from percy import errors
from percy import utils
__all__ = ['Runner']
class Runner(object):
def __init__(self, loader=None, config=None, client=None):
self.loader = loader
self.config = config or percy.Config()
self.client = client or percy.Client(config=self.config)
self._current_build = None
self._is_enabled = os.getenv('PERCY_ENABLE', '1') == '1'
if self._is_enabled:
try:
self.client.config.access_token
except errors.AuthError:
if self.client.environment.current_ci:
utils.print_error('[percy] Warning: Percy is disabled, no PERCY_TOKEN set.')
self._is_enabled = False
def initialize_build(self, **kwargs):
if not self._is_enabled:
return
build_resources = []
build_resources = self.loader.build_resources if self.loader else []
sha_to_build_resource = {}
for build_resource in build_resources:
sha_to_build_resource[build_resource.sha] = build_resource
self._current_build = self.client.create_build(resources=build_resources, **kwargs)
try:
missing_resources = self._current_build['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
for missing_resource in missing_resources:
sha = missing_resource['id']
resource = sha_to_build_resource.get(sha)
# A nicer error will be raised by the finalize API when the resource is still missing.
if resource:
print('Uploading new build resource: {}'.format(resource.resource_url))
# Optimization: we don't hold all build resources in memory. Instead we store a
if resource.local_path:
with open(resource.local_path, 'rb') as f:
content = f.read()
else:
content = resource.content
self.client.upload_resource(self._current_build['data']['id'], content)
except KeyError:
print(self._current_build)
def snapshot(self, **kwargs):
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError('Cannot call snapshot before build is initialized')
root_resource = self.loader.snapshot_resources[0]
build_id = self._current_build['data']['id']
snapshot_data = self.client.create_snapshot(build_id, [root_resource], **kwargs)
missing_resources = snapshot_data['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
if missing_resources:
self.client.upload_resource(build_id, root_resource.content)
self.client.finalize_snapshot(snapshot_data['data']['id'])
def finalize_build(self):
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError(
'Cannot finalize_build before build is initialized.')
self.client.finalize_build(self._current_build['data']['id'])
self._current_build = None
| true
| true
|
790baaf3d1cbb2d2e40b6686fd890453f1ef3bfa
| 4,145
|
py
|
Python
|
flat_api/models/flat_locales.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 8
|
2017-04-09T15:54:12.000Z
|
2021-07-14T13:38:43.000Z
|
flat_api/models/flat_locales.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 4
|
2018-07-20T13:22:40.000Z
|
2022-03-23T20:03:21.000Z
|
flat_api/models/flat_locales.py
|
FlatIO/api-client-python
|
898d1da77989b3e9075f0311b6a4d342a72e95ef
|
[
"Apache-2.0"
] | 2
|
2018-05-29T08:29:59.000Z
|
2018-07-23T07:16:13.000Z
|
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: developers@flat.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class FlatLocales(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
allowed enum values
"""
EN = "en"
ES = "es"
FR = "fr"
DE = "de"
IT = "it"
JA = "ja"
KO = "ko"
NL = "nl"
PL = "pl"
PT = "pt"
RO = "ro"
RU = "ru"
ZH_HANS = "zh-Hans"
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""FlatLocales - a model defined in OpenAPI""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlatLocales):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 40.242718
| 1,686
| 0.624367
|
import pprint
import re
import six
class FlatLocales(object):
EN = "en"
ES = "es"
FR = "fr"
DE = "de"
IT = "it"
JA = "ja"
KO = "ko"
NL = "nl"
PL = "pl"
PT = "pt"
RO = "ro"
RU = "ru"
ZH_HANS = "zh-Hans"
openapi_types = {
}
attribute_map = {
}
def __init__(self):
self.discriminator = None
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, FlatLocales):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
790bad08556d18e05ef626300a04ba27b8c8f520
| 6,297
|
py
|
Python
|
sbibm/third_party/kgof/test/test_goftest.py
|
michaeldeistler/sbibm-1
|
8e9875f79beb828c07fbf4820b30413914d1ceca
|
[
"MIT"
] | 2
|
2021-05-06T06:19:27.000Z
|
2022-02-20T19:49:55.000Z
|
sbibm/third_party/kgof/test/test_goftest.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | null | null | null |
sbibm/third_party/kgof/test/test_goftest.py
|
mackelab/sbibm
|
b9781c610a1a80d2de014ee46a29cf061fb6074a
|
[
"MIT"
] | 1
|
2022-01-23T15:54:06.000Z
|
2022-01-23T15:54:06.000Z
|
"""
Module for testing goftest module.
"""
__author__ = "wittawat"
import unittest
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as testing
import scipy.stats as stats
import sbibm.third_party.kgof.data as data
import sbibm.third_party.kgof.density as density
import sbibm.third_party.kgof.glo as glo
import sbibm.third_party.kgof.goftest as gof
import sbibm.third_party.kgof.kernel as kernel
import sbibm.third_party.kgof.util as util
class TestFSSD(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
"""
Nothing special. Just test basic things.
"""
seed = 12
# sample
n = 100
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
# only one dimension of the mean is shifted
# draw_mean = mean + np.hstack((1, np.zeros(d-1)))
draw_mean = mean + 0
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
# Test
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
# random test locations
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
tresult = fssd.perform_test(dat, return_simulated_stats=True)
# assertions
self.assertGreaterEqual(tresult["pvalue"], 0)
self.assertLessEqual(tresult["pvalue"], 1)
def test_optimized_fssd(self):
"""
Test FSSD test with parameter optimization.
"""
seed = 4
# sample size
n = 179
alpha = 0.01
for d in [1, 3]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
# Mean difference. obvious reject
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
# test
for J in [1, 4]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
Xtr = tr.X
gwidth0 = util.meddistance(Xtr, subsample=1000) ** 2
# random test locations
V0 = util.fit_gaussian_draw(Xtr, J, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_locs_widths(
p, tr, gwidth0, V0, **opts
)
# construct a test
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_auto_init_opt_fssd(self):
"""
Test FSSD-opt test with automatic parameter initialization.
"""
seed = 5
# sample size
n = 191
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
# Mean difference. obvious reject
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
# test
for J in [1, 3]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_auto_init(
p, tr, J, **opts
)
# construct a test
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_ustat_h1_mean_variance(self):
seed = 20
# sample
n = 200
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
draw_mean = mean + 2
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
# Test
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
# random test locations
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
fea_tensor = fssd.feature_tensor(X)
u_mean, u_variance = gof.FSSD.ustat_h1_mean_variance(fea_tensor)
# assertions
self.assertGreaterEqual(u_variance, 0)
# should reject H0
self.assertGreaterEqual(u_mean, 0)
def tearDown(self):
pass
# end class TestFSSD
class TestSteinWitness(unittest.TestCase):
def test_basic(self):
d = 3
p = density.IsotropicNormal(mean=np.zeros(d), variance=3.0)
q = density.IsotropicNormal(mean=np.zeros(d) + 2, variance=3.0)
k = kernel.KGauss(2.0)
ds = q.get_datasource()
n = 97
dat = ds.sample(n, seed=3)
witness = gof.SteinWitness(p, k, dat)
# points to evaluate the witness
J = 4
V = np.random.randn(J, d) * 2
evals = witness(V)
testing.assert_equal(evals.shape, (J, d))
# end class TestSteinWitness
if __name__ == "__main__":
unittest.main()
| 32.458763
| 88
| 0.545974
|
__author__ = "wittawat"
import unittest
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as testing
import scipy.stats as stats
import sbibm.third_party.kgof.data as data
import sbibm.third_party.kgof.density as density
import sbibm.third_party.kgof.glo as glo
import sbibm.third_party.kgof.goftest as gof
import sbibm.third_party.kgof.kernel as kernel
import sbibm.third_party.kgof.util as util
class TestFSSD(unittest.TestCase):
def setUp(self):
pass
def test_basic(self):
seed = 12
n = 100
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
draw_mean = mean + 0
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
tresult = fssd.perform_test(dat, return_simulated_stats=True)
self.assertGreaterEqual(tresult["pvalue"], 0)
self.assertLessEqual(tresult["pvalue"], 1)
def test_optimized_fssd(self):
seed = 4
n = 179
alpha = 0.01
for d in [1, 3]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
for J in [1, 4]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
Xtr = tr.X
gwidth0 = util.meddistance(Xtr, subsample=1000) ** 2
V0 = util.fit_gaussian_draw(Xtr, J, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_locs_widths(
p, tr, gwidth0, V0, **opts
)
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_auto_init_opt_fssd(self):
seed = 5
n = 191
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1.0
p = density.IsotropicNormal(mean, variance)
ds = data.DSIsotropicNormal(mean + 4, variance + 0)
dat = ds.sample(n, seed=seed)
for J in [1, 3]:
opts = {"reg": 1e-2, "max_iter": 10, "tol_fun": 1e-3, "disp": False}
tr, te = dat.split_tr_te(tr_proportion=0.3, seed=seed + 1)
V_opt, gw_opt, opt_result = gof.GaussFSSD.optimize_auto_init(
p, tr, J, **opts
)
k_opt = kernel.KGauss(gw_opt)
null_sim = gof.FSSDH0SimCovObs(n_simulate=2000, seed=10)
fssd_opt = gof.FSSD(p, k_opt, V_opt, null_sim=null_sim, alpha=alpha)
fssd_opt_result = fssd_opt.perform_test(te, return_simulated_stats=True)
assert fssd_opt_result["h0_rejected"]
def test_ustat_h1_mean_variance(self):
seed = 20
n = 200
alpha = 0.01
for d in [1, 4]:
mean = np.zeros(d)
variance = 1
isonorm = density.IsotropicNormal(mean, variance)
draw_mean = mean + 2
draw_variance = variance + 1
X = util.randn(n, d, seed=seed) * np.sqrt(draw_variance) + draw_mean
dat = data.Data(X)
for J in [1, 3]:
sig2 = util.meddistance(X, subsample=1000) ** 2
k = kernel.KGauss(sig2)
V = util.fit_gaussian_draw(X, J, seed=seed + 1)
null_sim = gof.FSSDH0SimCovObs(n_simulate=200, seed=3)
fssd = gof.FSSD(isonorm, k, V, null_sim=null_sim, alpha=alpha)
fea_tensor = fssd.feature_tensor(X)
u_mean, u_variance = gof.FSSD.ustat_h1_mean_variance(fea_tensor)
self.assertGreaterEqual(u_variance, 0)
self.assertGreaterEqual(u_mean, 0)
def tearDown(self):
pass
class TestSteinWitness(unittest.TestCase):
def test_basic(self):
d = 3
p = density.IsotropicNormal(mean=np.zeros(d), variance=3.0)
q = density.IsotropicNormal(mean=np.zeros(d) + 2, variance=3.0)
k = kernel.KGauss(2.0)
ds = q.get_datasource()
n = 97
dat = ds.sample(n, seed=3)
witness = gof.SteinWitness(p, k, dat)
J = 4
V = np.random.randn(J, d) * 2
evals = witness(V)
testing.assert_equal(evals.shape, (J, d))
if __name__ == "__main__":
unittest.main()
| true
| true
|
790bae7773b5f6ce59d80bb4d211c93504258926
| 4,504
|
py
|
Python
|
brave/evaluate_video_embeddings.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 26
|
2021-10-14T19:06:56.000Z
|
2022-03-02T18:22:45.000Z
|
brave/evaluate_video_embeddings.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 1
|
2022-01-31T23:23:31.000Z
|
2022-02-08T01:07:15.000Z
|
brave/evaluate_video_embeddings.py
|
deepmind/brave
|
0ae20d9afcf6b1fa4d31d70c906d711901b56e9c
|
[
"Apache-2.0"
] | 1
|
2022-02-04T10:54:53.000Z
|
2022-02-04T10:54:53.000Z
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A runnable program to evaluate video embeddings.
Given a model checkpoint, and the location of the shards for a dataset,
computes the performance of the Brave video embeddings. This code
may be used to evaluate both UCF101 and HMDB51, as long as they are both
given in the appropriate input format. The only hyperparameter to this program
is the svm_regularization constant, which can impact the performance of the
linear classification.
"""
import glob
import json
from absl import app
from absl import flags
import chex
import jax
import numpy as np
import tensorflow as tf
from brave.datasets import datasets
from brave.evaluate import evaluate_video_embedding
from brave.models.brave import brave
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')
flags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')
# Hyperparameters
flags.DEFINE_float('svm_regularization', None, 'Regularization constant.')
# Datasets
flags.DEFINE_string('train_dataset_shards', None,
'Glob pattern for train shards.')
flags.DEFINE_string('test_dataset_shards', None,
'Glob pattern for test shards.')
# Transformations to apply to video before running network.
flags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')
flags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')
flags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')
def main(_):
checkpoint_path = FLAGS.checkpoint_path
train_shards = glob.glob(FLAGS.train_dataset_shards)
test_shards = glob.glob(FLAGS.test_dataset_shards)
video_config = evaluate_video_embedding.VideoConfig(
num_frames=FLAGS.num_video_frames,
image_size=FLAGS.image_size,
video_step=FLAGS.video_step,
)
video_embedding_fn = _video_embedding(checkpoint_path)
results = evaluate_video_embedding.evaluate_video_embedding(
train_dataset_shards=train_shards,
test_dataset_shards=test_shards,
embedding_fn=video_embedding_fn,
config=video_config,
svm_regularization=FLAGS.svm_regularization,
batch_size=FLAGS.batch_size)
results_dct = dict(
top_1_train=results.train.top_one_accuracy,
top_5_train=results.train.top_five_accuracy,
top_1_test=results.test.top_one_accuracy,
top_5_test=results.test.top_five_accuracy,
)
# Write the results to stdout in a way that can be used as input to other
# programs.
print(json.dumps(results_dct))
def _video_embedding(checkpoint_path: str):
"""Load the video embedding for the BraVe model to evaluate."""
checkpoint = np.load(checkpoint_path, allow_pickle=True).item()
params = checkpoint['params']
state = checkpoint['state']
brave_config_dct = checkpoint['config']
brave_config = brave.BraveConfig(**brave_config_dct)
model = brave.get_model(brave_config)
@jax.jit
def embedding_fn(view: datasets.View) -> chex.Array:
narrow_forward_fn = model.forward_fns['narrow_video']
embedding, _ = narrow_forward_fn(params, state, None, view, False)
return embedding
def synchronous_embedding_fn(view: datasets.View) -> chex.Array:
# jax.jit causes the above function to be executed lazily, but we want
# to force the computation to happen synchronously.
return jax.device_get(embedding_fn(view))
return synchronous_embedding_fn
if __name__ == '__main__':
try:
tf.config.set_visible_devices([], 'GPU') # Prevent TF from using the GPU.
except tf.errors.NotFoundError:
pass
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('train_dataset_shards')
flags.mark_flag_as_required('test_dataset_shards')
flags.mark_flag_as_required('svm_regularization')
app.run(main)
| 34.381679
| 80
| 0.75222
|
import glob
import json
from absl import app
from absl import flags
import chex
import jax
import numpy as np
import tensorflow as tf
from brave.datasets import datasets
from brave.evaluate import evaluate_video_embedding
from brave.models.brave import brave
FLAGS = flags.FLAGS
flags.DEFINE_string('checkpoint_path', None, 'Checkpoint to evaluate.')
flags.DEFINE_integer('batch_size', None, 'The size of the batches to use.')
flags.DEFINE_float('svm_regularization', None, 'Regularization constant.')
flags.DEFINE_string('train_dataset_shards', None,
'Glob pattern for train shards.')
flags.DEFINE_string('test_dataset_shards', None,
'Glob pattern for test shards.')
flags.DEFINE_integer('num_video_frames', 32, 'Number of frames in eval videos.')
flags.DEFINE_integer('video_step', 2, 'The step to use in the eval videos.')
flags.DEFINE_integer('image_size', 224, 'The size of the video to evaluate.')
def main(_):
checkpoint_path = FLAGS.checkpoint_path
train_shards = glob.glob(FLAGS.train_dataset_shards)
test_shards = glob.glob(FLAGS.test_dataset_shards)
video_config = evaluate_video_embedding.VideoConfig(
num_frames=FLAGS.num_video_frames,
image_size=FLAGS.image_size,
video_step=FLAGS.video_step,
)
video_embedding_fn = _video_embedding(checkpoint_path)
results = evaluate_video_embedding.evaluate_video_embedding(
train_dataset_shards=train_shards,
test_dataset_shards=test_shards,
embedding_fn=video_embedding_fn,
config=video_config,
svm_regularization=FLAGS.svm_regularization,
batch_size=FLAGS.batch_size)
results_dct = dict(
top_1_train=results.train.top_one_accuracy,
top_5_train=results.train.top_five_accuracy,
top_1_test=results.test.top_one_accuracy,
top_5_test=results.test.top_five_accuracy,
)
print(json.dumps(results_dct))
def _video_embedding(checkpoint_path: str):
checkpoint = np.load(checkpoint_path, allow_pickle=True).item()
params = checkpoint['params']
state = checkpoint['state']
brave_config_dct = checkpoint['config']
brave_config = brave.BraveConfig(**brave_config_dct)
model = brave.get_model(brave_config)
@jax.jit
def embedding_fn(view: datasets.View) -> chex.Array:
narrow_forward_fn = model.forward_fns['narrow_video']
embedding, _ = narrow_forward_fn(params, state, None, view, False)
return embedding
def synchronous_embedding_fn(view: datasets.View) -> chex.Array:
return jax.device_get(embedding_fn(view))
return synchronous_embedding_fn
if __name__ == '__main__':
try:
tf.config.set_visible_devices([], 'GPU')
except tf.errors.NotFoundError:
pass
flags.mark_flag_as_required('checkpoint_path')
flags.mark_flag_as_required('batch_size')
flags.mark_flag_as_required('train_dataset_shards')
flags.mark_flag_as_required('test_dataset_shards')
flags.mark_flag_as_required('svm_regularization')
app.run(main)
| true
| true
|
790baee50d3a4eb3520cf77a358a4df1a1cb9b46
| 1,573
|
py
|
Python
|
ppmessage/api/handlers/ppmovepredefinedscriptintogroup.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | 3
|
2018-07-22T10:56:42.000Z
|
2020-01-14T10:33:26.000Z
|
ppmessage/api/handlers/ppmovepredefinedscriptintogroup.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | null | null | null |
ppmessage/api/handlers/ppmovepredefinedscriptintogroup.py
|
x-debug/ppmessage_fork
|
a2cb51333b2bfed92fb81ae130c825d0eada7c69
|
[
"MIT"
] | 7
|
2018-03-22T05:27:47.000Z
|
2021-01-19T13:03:17.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
import json
import logging
class PPMovePredefinedScriptIntoGroup(BaseHandler):
def _move(self):
_request = json.loads(self.request.body)
_group_uuid = str(_request.get("group_uuid"))
_script_uuid = _request.get("script_uuid")
if _script_uuid == None or len(_script_uuid) == 0:
self.setErrorCode(API_ERR.NO_PARA)
return
_script = redis_hash_to_dict(self.application.redis, PredefinedScript, _script_uuid)
if _script == None:
logging.error("No such script: %s" % _script_uuid)
return
_old_group_uuid = str(_script.get("group_uuid"))
_key = PredefinedScript.__tablename__ + ".group_uuid." + _old_group_uuid
self.application.redis.srem(_key, _script_uuid)
_row = PredefinedScript(uuid=_script_uuid, group_uuid=_group_uuid)
_row.async_update()
_row.update_redis_keys(self.application.redis)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPMovePredefinedScriptIntoGroup, self)._Task()
self._move()
return
| 30.843137
| 92
| 0.684043
|
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
import json
import logging
class PPMovePredefinedScriptIntoGroup(BaseHandler):
def _move(self):
_request = json.loads(self.request.body)
_group_uuid = str(_request.get("group_uuid"))
_script_uuid = _request.get("script_uuid")
if _script_uuid == None or len(_script_uuid) == 0:
self.setErrorCode(API_ERR.NO_PARA)
return
_script = redis_hash_to_dict(self.application.redis, PredefinedScript, _script_uuid)
if _script == None:
logging.error("No such script: %s" % _script_uuid)
return
_old_group_uuid = str(_script.get("group_uuid"))
_key = PredefinedScript.__tablename__ + ".group_uuid." + _old_group_uuid
self.application.redis.srem(_key, _script_uuid)
_row = PredefinedScript(uuid=_script_uuid, group_uuid=_group_uuid)
_row.async_update()
_row.update_redis_keys(self.application.redis)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPMovePredefinedScriptIntoGroup, self)._Task()
self._move()
return
| true
| true
|
790bb1342a730b61c3eb1c2540883c5b76180c39
| 1,699
|
py
|
Python
|
main.py
|
v-sht/url-shortener
|
5110e4cf23478e44ebbeb0a7514e98f31031c6f5
|
[
"MIT"
] | null | null | null |
main.py
|
v-sht/url-shortener
|
5110e4cf23478e44ebbeb0a7514e98f31031c6f5
|
[
"MIT"
] | null | null | null |
main.py
|
v-sht/url-shortener
|
5110e4cf23478e44ebbeb0a7514e98f31031c6f5
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlparse
from dotenv import load_dotenv
import requests
import os
import argparse
def shorten_link(token, url):
response = requests.post(
"https://api-ssl.bitly.com/v4/bitlinks",
headers={"Authorization": "Bearer {}".format(token)},
json={"long_url": url})
response.raise_for_status()
return response.json()["link"]
def count_clicks(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}/clicks/summary"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
response.raise_for_status()
return response.json()["total_clicks"]
def is_bitlink(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
return response.ok
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Программа для сокращения ссылок или "
"подсчёта количества переходов для bitlink")
parser.add_argument("url", help="Введите URL или bitlink")
args = parser.parse_args()
link = args.url
parsed_bitlink = urlparse(link)
load_dotenv()
token = os.environ["BITLY_TOKEN"]
try:
if is_bitlink(token, parsed_bitlink):
clicks_count = count_clicks(token, parsed_bitlink)
print("Количество переходов по вашей ссылке: ", clicks_count)
else:
bitlink = shorten_link(token, link)
print("Сокращенная ссылка: ", bitlink)
except:
print("Вы ввели неправильную ссылку")
| 31.462963
| 73
| 0.646851
|
from urllib.parse import urlparse
from dotenv import load_dotenv
import requests
import os
import argparse
def shorten_link(token, url):
response = requests.post(
"https://api-ssl.bitly.com/v4/bitlinks",
headers={"Authorization": "Bearer {}".format(token)},
json={"long_url": url})
response.raise_for_status()
return response.json()["link"]
def count_clicks(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}/clicks/summary"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
response.raise_for_status()
return response.json()["total_clicks"]
def is_bitlink(token, link):
response = requests.get(
"https://api-ssl.bitly.com/v4/bitlinks/{0}{1}"
.format(link.netloc, link.path),
headers={"Authorization": "Bearer {}".format(token)})
return response.ok
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Программа для сокращения ссылок или "
"подсчёта количества переходов для bitlink")
parser.add_argument("url", help="Введите URL или bitlink")
args = parser.parse_args()
link = args.url
parsed_bitlink = urlparse(link)
load_dotenv()
token = os.environ["BITLY_TOKEN"]
try:
if is_bitlink(token, parsed_bitlink):
clicks_count = count_clicks(token, parsed_bitlink)
print("Количество переходов по вашей ссылке: ", clicks_count)
else:
bitlink = shorten_link(token, link)
print("Сокращенная ссылка: ", bitlink)
except:
print("Вы ввели неправильную ссылку")
| true
| true
|
790bb26116ce9c96df8d92137cd7685d7085844f
| 205
|
py
|
Python
|
courses/src/base_app/utils.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
courses/src/base_app/utils.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
courses/src/base_app/utils.py
|
yuramorozov01/courses_system
|
582532b2a2753d89642e1e8dbee0f369774638b1
|
[
"Apache-2.0"
] | null | null | null |
import uuid
def get_unique_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4(), ext)
return 'user_{0}/{1}'.format(instance.author.id, filename)
| 25.625
| 62
| 0.653659
|
import uuid
def get_unique_filename(instance, filename):
ext = filename.split('.')[-1]
filename = '{}.{}'.format(uuid.uuid4(), ext)
return 'user_{0}/{1}'.format(instance.author.id, filename)
| true
| true
|
790bb2b2346db328645da4a58ef2ec3b51ffc921
| 311,499
|
py
|
Python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | 1
|
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_08_01/aio/operations/_operations.py
|
vincenttran-msft/azure-sdk-for-python
|
348b56f9f03eeb3f7b502eed51daf494ffff874d
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_deployment_operations_get_at_management_group_scope_request, build_deployment_operations_get_at_scope_request, build_deployment_operations_get_at_subscription_scope_request, build_deployment_operations_get_at_tenant_scope_request, build_deployment_operations_get_request, build_deployment_operations_list_at_management_group_scope_request, build_deployment_operations_list_at_scope_request, build_deployment_operations_list_at_subscription_scope_request, build_deployment_operations_list_at_tenant_scope_request, build_deployment_operations_list_request, build_deployments_calculate_template_hash_request, build_deployments_cancel_at_management_group_scope_request, build_deployments_cancel_at_scope_request, build_deployments_cancel_at_subscription_scope_request, build_deployments_cancel_at_tenant_scope_request, build_deployments_cancel_request, build_deployments_check_existence_at_management_group_scope_request, build_deployments_check_existence_at_scope_request, build_deployments_check_existence_at_subscription_scope_request, build_deployments_check_existence_at_tenant_scope_request, build_deployments_check_existence_request, build_deployments_create_or_update_at_management_group_scope_request_initial, build_deployments_create_or_update_at_scope_request_initial, build_deployments_create_or_update_at_subscription_scope_request_initial, build_deployments_create_or_update_at_tenant_scope_request_initial, build_deployments_create_or_update_request_initial, build_deployments_delete_at_management_group_scope_request_initial, build_deployments_delete_at_scope_request_initial, build_deployments_delete_at_subscription_scope_request_initial, build_deployments_delete_at_tenant_scope_request_initial, build_deployments_delete_request_initial, build_deployments_export_template_at_management_group_scope_request, build_deployments_export_template_at_scope_request, build_deployments_export_template_at_subscription_scope_request, build_deployments_export_template_at_tenant_scope_request, build_deployments_export_template_request, build_deployments_get_at_management_group_scope_request, build_deployments_get_at_scope_request, build_deployments_get_at_subscription_scope_request, build_deployments_get_at_tenant_scope_request, build_deployments_get_request, build_deployments_list_at_management_group_scope_request, build_deployments_list_at_scope_request, build_deployments_list_at_subscription_scope_request, build_deployments_list_at_tenant_scope_request, build_deployments_list_by_resource_group_request, build_deployments_validate_at_management_group_scope_request, build_deployments_validate_at_scope_request, build_deployments_validate_at_subscription_scope_request, build_deployments_validate_at_tenant_scope_request, build_deployments_validate_request, build_deployments_what_if_at_subscription_scope_request_initial, build_deployments_what_if_request_initial, build_operations_list_request, build_providers_get_at_tenant_scope_request, build_providers_get_request, build_providers_list_at_tenant_scope_request, build_providers_list_request, build_providers_register_request, build_providers_unregister_request, build_resource_groups_check_existence_request, build_resource_groups_create_or_update_request, build_resource_groups_delete_request_initial, build_resource_groups_export_template_request_initial, build_resource_groups_get_request, build_resource_groups_list_request, build_resource_groups_update_request, build_resources_check_existence_by_id_request, build_resources_check_existence_request, build_resources_create_or_update_by_id_request_initial, build_resources_create_or_update_request_initial, build_resources_delete_by_id_request_initial, build_resources_delete_request_initial, build_resources_get_by_id_request, build_resources_get_request, build_resources_list_by_resource_group_request, build_resources_list_request, build_resources_move_resources_request_initial, build_resources_update_by_id_request_initial, build_resources_update_request_initial, build_resources_validate_move_resources_request_initial, build_tags_create_or_update_request, build_tags_create_or_update_value_request, build_tags_delete_request, build_tags_delete_value_request, build_tags_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Microsoft.Resources REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_operations_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_operations_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.Resources/operations"} # type: ignore
class DeploymentsOperations: # pylint: disable=too-many-public-methods
"""DeploymentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_at_scope_initial( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_scope( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_scope_initial(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at a given scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_scope( # pylint: disable=inconsistent-return-statements
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_scope(
self,
scope: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments at the given scope.
:param scope: The scope of a deployment.
:type scope: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_scope_request(
scope=scope,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_scope_request(
scope=scope,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_tenant_scope_initial( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_tenant_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_tenant_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_tenant_scope_initial(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at tenant scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_tenant_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_tenant_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments at the tenant scope.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_tenant_scope_request(
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_management_group_scope_initial( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_management_group_scope( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_management_group_scope_initial(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at management group scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_management_group_scope( # pylint: disable=inconsistent-return-statements
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ScopedDeployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
@distributed_trace_async
async def export_template_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a management group.
:param group_id: The management group ID.
:type group_id: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_at_subscription_scope_initial( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete_at_subscription_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. This is an asynchronous operation that
returns a status of 202 until the template deployment is successfully deleted. The Location
response header contains the URI that is used to obtain the status of the process. While the
process is running, a call to the URI in the Location header returns a status of 202. When the
process finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_at_subscription_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources at subscription scope.
You can provide the template and parameters directly in the request or link to JSON files.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel_at_subscription_scope( # pylint: disable=inconsistent-return-statements
self,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resources partially
deployed.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
async def _what_if_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.WhatIfOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def begin_what_if_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
"""Returns changes that will be made by the deployment if executed at the scope of the
subscription.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to What If.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentWhatIf
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.WhatIfOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WhatIfOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._what_if_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def export_template_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_at_subscription_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a subscription.
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_delete_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a deployment from the deployment history.
A template deployment that is currently running cannot be deleted. Deleting a template
deployment removes the associated deployment operations. Deleting a template deployment does
not affect the state of the resource group. This is an asynchronous operation that returns a
status of 202 until the template deployment is successfully deleted. The Location response
header contains the URI that is used to obtain the status of the process. While the process is
running, a call to the URI in the Location header returns a status of 202. When the process
finishes, the URI in the Location header returns a status of 204 on success. If the
asynchronous request failed, the URI in the Location header returns an error-level status code.
:param resource_group_name: The name of the resource group with the deployment to delete. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> bool:
"""Checks whether the deployment exists.
:param resource_group_name: The name of the resource group with the deployment to check. The
name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_check_existence_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
"""Deploys resources to a resource group.
You can provide the template and parameters directly in the request or link to JSON files.
:param resource_group_name: The name of the resource group to deploy the resources to. The name
is case insensitive. The resource group must already exist.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Additional parameters supplied to the operation.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DeploymentExtended or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
"""Gets a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExtended, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExtended
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExtended"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"} # type: ignore
@distributed_trace_async
async def cancel( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
"""Cancels a currently running template deployment.
You can cancel a deployment only if the provisioningState is Accepted or Running. After the
deployment is canceled, the provisioningState is set to Canceled. Canceling a template
deployment stops the currently running template deployment and leaves the resource group
partially deployed.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_cancel_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"} # type: ignore
@distributed_trace_async
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
"""Validates whether the specified template is syntactically correct and will be accepted by Azure
Resource Manager..
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.Deployment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentValidateResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentValidateResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentValidateResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"} # type: ignore
async def _what_if_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.WhatIfOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
"""Returns changes that will be made by the deployment if executed at the scope of the resource
group.
:param resource_group_name: The name of the resource group the template will be deployed to.
The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param parameters: Parameters to validate.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentWhatIf
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either WhatIfOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.WhatIfOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WhatIfOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._what_if_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"} # type: ignore
@distributed_trace_async
async def export_template(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
"""Exports the template used for specified deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentExportResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentExportResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentExportResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployments_export_template_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
"""Get all the deployments for a resource group.
:param resource_group_name: The name of the resource group with the deployments to get. The
name is case insensitive.
:type resource_group_name: str
:param filter: The filter to apply on the operation. For example, you can use
$filter=provisioningState eq '{state}'. Default value is None.
:type filter: str
:param top: The number of results to get. If null is passed, returns all deployments. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/"} # type: ignore
@distributed_trace_async
async def calculate_template_hash(
self,
template: Any,
**kwargs: Any
) -> "_models.TemplateHashResult":
"""Calculate the hash of the given template.
:param template: The template provided to calculate hash.
:type template: any
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TemplateHashResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TemplateHashResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TemplateHashResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(template, 'object')
request = build_deployments_calculate_template_hash_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.calculate_template_hash.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateHashResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_template_hash.metadata = {'url': "/providers/Microsoft.Resources/calculateTemplateHash"} # type: ignore
class ProvidersOperations:
"""ProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def unregister(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Unregisters a subscription from a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to unregister.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"} # type: ignore
@distributed_trace_async
async def register(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
"""Registers a subscription with a resource provider.
:param resource_provider_namespace: The namespace of the resource provider to register.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"} # type: ignore
@distributed_trace
def list(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
"""Gets all resource providers for a subscription.
:param top: The number of results to return. If null is passed returns all deployments. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
"""Gets all resource providers for the tenant.
:param top: The number of results to return. If null is passed returns all providers. Default
value is None.
:type top: int
:param expand: The properties to include in the results. For example, use &$expand=metadata in
the query string to retrieve resource provider metadata. To include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_at_tenant_scope_request(
api_version=api_version,
top=top,
expand=expand,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
"""Gets the specified resource provider at the tenant level.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param expand: The $expand query parameter. For example, to include property aliases in
response, use $expand=resourceTypes/aliases. Default value is None.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Provider, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.Provider
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Provider"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_providers_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
api_version=api_version,
expand=expand,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/{resourceProviderNamespace}"} # type: ignore
class ResourcesOperations: # pylint: disable=too-many-public-methods
"""ResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
"""Get all the resources for a resource group.
:param resource_group_name: The resource group with the resources to get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1'. When you filter by a tag name and value,
the tags for each resource are not returned in the results.:code:`<br>`:code:`<br>`You can use
some properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"} # type: ignore
async def _move_resources_initial( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"} # type: ignore
@distributed_trace_async
async def begin_move_resources( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"} # type: ignore
async def _validate_move_resources_initial( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_validate_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._validate_move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"} # type: ignore
@distributed_trace_async
async def begin_validate_move_resources( # pylint: disable=inconsistent-return-statements
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._validate_move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
"""Get all the resources in a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1'. When you filter by a tag name and value,
the tags for each resource are not returned in the results.:code:`<br>`:code:`<br>`You can use
some properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId. Default
value is None.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``. Default value is None.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources. Default
value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resources"} # type: ignore
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource exists.
:param resource_group_name: The name of the resource group containing the resource to check.
The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider of the resource to check.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to check whether it exists.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource.
:param resource_group_name: The name of the resource group that contains the resource to
delete. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to delete.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create.
:type resource_type: str
:param resource_name: The name of the resource to create.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for creating or updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update.
:type resource_type: str
:param resource_name: The name of the resource to update.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
"""Gets a resource.
:param resource_group_name: The name of the resource group containing the resource to get. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"} # type: ignore
@distributed_trace_async
async def check_existence_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> bool:
"""Checks by ID whether a resource exists.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.check_existence_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _delete_by_id_initial( # pylint: disable=inconsistent-return-statements
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_by_id_request_initial(
resource_id=resource_id,
api_version=api_version,
template_url=self._delete_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_delete_by_id( # pylint: disable=inconsistent-return-statements
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_by_id_initial(
resource_id=resource_id,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _create_or_update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Create or update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
async def _update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def begin_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GenericResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
@distributed_trace_async
async def get_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
"""Gets a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': "/{resourceId}"} # type: ignore
class ResourceGroupsOperations:
"""ResourceGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
**kwargs: Any
) -> bool:
"""Checks whether a resource group exists.
:param resource_group_name: The name of the resource group to check. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_check_existence_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroup",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Creates or updates a resource group.
:param resource_group_name: The name of the resource group to create or update. Can include
alphanumeric, underscore, parentheses, hyphen, period (except at end), and Unicode characters
that match the allowed characters.
:type resource_group_name: str
:param parameters: Parameters supplied to the create or update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroup')
request = build_resource_groups_create_or_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_delete_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a resource group.
When you delete a resource group, all of its resources are also deleted. Deleting a resource
group deletes all of its template deployments and currently stored operations.
:param resource_group_name: The name of the resource group to delete. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ResourceGroup":
"""Gets a resource group.
:param resource_group_name: The name of the resource group to get. The name is case
insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_resource_groups_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroupPatchable",
**kwargs: Any
) -> "_models.ResourceGroup":
"""Updates a resource group.
Resource groups can be updated through a simple PATCH operation to a group address. The format
of the request is the same as that for creating a resource group. If a field is unspecified,
the current value is retained.
:param resource_group_name: The name of the resource group to update. The name is case
insensitive.
:type resource_group_name: str
:param parameters: Parameters supplied to update a resource group.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupPatchable
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceGroup, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ResourceGroupPatchable')
request = build_resource_groups_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"} # type: ignore
async def _export_template_initial(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> Optional["_models.ResourceGroupExportResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ResourceGroupExportResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ExportTemplateRequest')
request = build_resource_groups_export_template_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._export_template_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_template_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"} # type: ignore
@distributed_trace_async
async def begin_export_template(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.ResourceGroupExportResult"]:
"""Captures the specified resource group as a template.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param parameters: Parameters for exporting the template.
:type parameters: ~azure.mgmt.resource.resources.v2019_08_01.models.ExportTemplateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ResourceGroupExportResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupExportResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupExportResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._export_template_initial(
resource_group_name=resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"} # type: ignore
@distributed_trace
def list(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceGroupListResult"]:
"""Gets all the resource groups for a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`You can filter by
tag names and values. For example, to filter for a tag name and value, use $filter=tagName eq
'tag1' and tagValue eq 'Value1'. Default value is None.
:type filter: str
:param top: The number of results to return. If null is passed, returns all resource groups.
Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceGroupListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.ResourceGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups"} # type: ignore
class TagsOperations:
"""TagsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete_value( # pylint: disable=inconsistent-return-statements
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
"""Deletes a tag value.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to delete.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"} # type: ignore
@distributed_trace_async
async def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
"""Creates a tag value. The name of the tag must already exist.
:param tag_name: The name of the tag.
:type tag_name: str
:param tag_value: The value of the tag to create.
:type tag_value: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagValue, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagValue
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagValue"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
"""Creates a tag in the subscription.
The tag name can have a maximum of 512 characters and is case insensitive. Tag names created by
Azure have prefixes of microsoft, azure, or windows. You cannot create tags with one of these
prefixes.
:param tag_name: The name of the tag to create.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TagDetails, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.TagDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagDetails"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
tag_name: str,
**kwargs: Any
) -> None:
"""Deletes a tag from the subscription.
You must remove all values from a resource tag before you can delete it.
:param tag_name: The name of the tag.
:type tag_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_tags_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TagsListResult"]:
"""Gets the names and values of all resource tags that are defined in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TagsListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.TagsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.TagsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames"} # type: ignore
class DeploymentOperationsOperations:
"""DeploymentOperationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_scope(
self,
scope: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param scope: The scope of a deployment.
:type scope: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_tenant_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_tenant_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param group_id: The management group ID.
:type group_id: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_at_subscription_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list_at_subscription_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
"""Gets a deployments operation.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param operation_id: The ID of the operation to get.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentOperation, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
request = build_deployment_operations_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
"""Gets all deployments operations for a deployment.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param top: The number of results to return. Default value is None.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeploymentOperationsListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.resources.v2019_08_01.models.DeploymentOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2019-08-01") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeploymentOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"} # type: ignore
| 44.411035
| 4,257
| 0.6624
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_deployment_operations_get_at_management_group_scope_request, build_deployment_operations_get_at_scope_request, build_deployment_operations_get_at_subscription_scope_request, build_deployment_operations_get_at_tenant_scope_request, build_deployment_operations_get_request, build_deployment_operations_list_at_management_group_scope_request, build_deployment_operations_list_at_scope_request, build_deployment_operations_list_at_subscription_scope_request, build_deployment_operations_list_at_tenant_scope_request, build_deployment_operations_list_request, build_deployments_calculate_template_hash_request, build_deployments_cancel_at_management_group_scope_request, build_deployments_cancel_at_scope_request, build_deployments_cancel_at_subscription_scope_request, build_deployments_cancel_at_tenant_scope_request, build_deployments_cancel_request, build_deployments_check_existence_at_management_group_scope_request, build_deployments_check_existence_at_scope_request, build_deployments_check_existence_at_subscription_scope_request, build_deployments_check_existence_at_tenant_scope_request, build_deployments_check_existence_request, build_deployments_create_or_update_at_management_group_scope_request_initial, build_deployments_create_or_update_at_scope_request_initial, build_deployments_create_or_update_at_subscription_scope_request_initial, build_deployments_create_or_update_at_tenant_scope_request_initial, build_deployments_create_or_update_request_initial, build_deployments_delete_at_management_group_scope_request_initial, build_deployments_delete_at_scope_request_initial, build_deployments_delete_at_subscription_scope_request_initial, build_deployments_delete_at_tenant_scope_request_initial, build_deployments_delete_request_initial, build_deployments_export_template_at_management_group_scope_request, build_deployments_export_template_at_scope_request, build_deployments_export_template_at_subscription_scope_request, build_deployments_export_template_at_tenant_scope_request, build_deployments_export_template_request, build_deployments_get_at_management_group_scope_request, build_deployments_get_at_scope_request, build_deployments_get_at_subscription_scope_request, build_deployments_get_at_tenant_scope_request, build_deployments_get_request, build_deployments_list_at_management_group_scope_request, build_deployments_list_at_scope_request, build_deployments_list_at_subscription_scope_request, build_deployments_list_at_tenant_scope_request, build_deployments_list_by_resource_group_request, build_deployments_validate_at_management_group_scope_request, build_deployments_validate_at_scope_request, build_deployments_validate_at_subscription_scope_request, build_deployments_validate_at_tenant_scope_request, build_deployments_validate_request, build_deployments_what_if_at_subscription_scope_request_initial, build_deployments_what_if_request_initial, build_operations_list_request, build_providers_get_at_tenant_scope_request, build_providers_get_request, build_providers_list_at_tenant_scope_request, build_providers_list_request, build_providers_register_request, build_providers_unregister_request, build_resource_groups_check_existence_request, build_resource_groups_create_or_update_request, build_resource_groups_delete_request_initial, build_resource_groups_export_template_request_initial, build_resource_groups_get_request, build_resource_groups_list_request, build_resource_groups_update_request, build_resources_check_existence_by_id_request, build_resources_check_existence_request, build_resources_create_or_update_by_id_request_initial, build_resources_create_or_update_request_initial, build_resources_delete_by_id_request_initial, build_resources_delete_request_initial, build_resources_get_by_id_request, build_resources_get_request, build_resources_list_by_resource_group_request, build_resources_list_request, build_resources_move_resources_request_initial, build_resources_update_by_id_request_initial, build_resources_update_request_initial, build_resources_validate_move_resources_request_initial, build_tags_create_or_update_request, build_tags_create_or_update_value_request, build_tags_delete_request, build_tags_delete_value_request, build_tags_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.OperationListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_operations_list_request(
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_operations_list_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/providers/Microsoft.Resources/operations"}
class DeploymentsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_at_scope_initial(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_delete_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_check_existence_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_scope_initial(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_scope_request_initial(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_scope_initial.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_create_or_update_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_at_scope_initial(
scope=scope,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_cancel_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@distributed_trace_async
async def validate_at_scope(
self,
scope: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
@distributed_trace_async
async def export_template_at_scope(
self,
scope: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_export_template_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"}
@distributed_trace
def list_at_scope(
self,
scope: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_scope_request(
scope=scope,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_scope_request(
scope=scope,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/"}
async def _delete_at_tenant_scope_initial(
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_delete_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_at_tenant_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_check_existence_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_tenant_scope_initial(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_tenant_scope_request_initial(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_tenant_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_tenant_scope_initial.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_create_or_update_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_at_tenant_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_get_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_cancel_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@distributed_trace_async
async def validate_at_tenant_scope(
self,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
@distributed_trace_async
async def export_template_at_tenant_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_export_template_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"}
@distributed_trace
def list_at_tenant_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_tenant_scope_request(
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/"}
async def _delete_at_management_group_scope_initial(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_delete_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self._delete_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_check_existence_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.check_existence_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_management_group_scope_initial(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_create_or_update_at_management_group_scope_request_initial(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_management_group_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_management_group_scope_initial.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_create_or_update_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_at_management_group_scope_initial(
group_id=group_id,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_cancel_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.cancel_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@distributed_trace_async
async def validate_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
parameters: "_models.ScopedDeployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ScopedDeployment')
request = build_deployments_validate_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
@distributed_trace_async
async def export_template_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_export_template_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
template_url=self.export_template_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"}
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_management_group_scope_request(
group_id=group_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/"}
async def _delete_at_subscription_scope_initial(
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_delete_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_at_subscription_scope_initial(
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_check_existence_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_create_or_update_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_get_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_cancel_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@distributed_trace_async
async def validate_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
async def _what_if_at_subscription_scope_initial(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_at_subscription_scope_request_initial(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_at_subscription_scope_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_at_subscription_scope_initial.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"}
@distributed_trace_async
async def begin_what_if_at_subscription_scope(
self,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._what_if_at_subscription_scope_initial(
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"}
@distributed_trace_async
async def export_template_at_subscription_scope(
self,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_export_template_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"}
@distributed_trace
def list_at_subscription_scope(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_at_subscription_scope_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/"}
async def _delete_initial(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_delete_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_check_existence_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
async def _create_or_update_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_create_or_update_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> AsyncLROPoller["_models.DeploymentExtended"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExtended":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExtended', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}"}
@distributed_trace_async
async def cancel(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_cancel_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.cancel.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel"}
@distributed_trace_async
async def validate(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.Deployment",
**kwargs: Any
) -> "_models.DeploymentValidateResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'Deployment')
request = build_deployments_validate_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.validate.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 400]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if response.status_code == 400:
deserialized = self._deserialize('DeploymentValidateResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
validate.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate"}
async def _what_if_initial(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> Optional["_models.WhatIfOperationResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'DeploymentWhatIf')
request = build_deployments_what_if_request_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._what_if_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('str', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_what_if_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"}
@distributed_trace_async
async def begin_what_if(
self,
resource_group_name: str,
deployment_name: str,
parameters: "_models.DeploymentWhatIf",
**kwargs: Any
) -> AsyncLROPoller["_models.WhatIfOperationResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._what_if_initial(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WhatIfOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_what_if.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/whatIf"}
@distributed_trace_async
async def export_template(
self,
resource_group_name: str,
deployment_name: str,
**kwargs: Any
) -> "_models.DeploymentExportResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployments_export_template_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.export_template.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate"}
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployments_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/"}
@distributed_trace_async
async def calculate_template_hash(
self,
template: Any,
**kwargs: Any
) -> "_models.TemplateHashResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(template, 'object')
request = build_deployments_calculate_template_hash_request(
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.calculate_template_hash.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('TemplateHashResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_template_hash.metadata = {'url': "/providers/Microsoft.Resources/calculateTemplateHash"}
class ProvidersOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def unregister(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_providers_unregister_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister"}
@distributed_trace_async
async def register(
self,
resource_provider_namespace: str,
**kwargs: Any
) -> "_models.Provider":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_providers_register_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"}
@distributed_trace
def list(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
expand=expand,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers"}
@distributed_trace
def list_at_tenant_scope(
self,
top: Optional[int] = None,
expand: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.ProviderListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_providers_list_at_tenant_scope_request(
api_version=api_version,
top=top,
expand=expand,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_providers_list_at_tenant_scope_request(
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProviderListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers"}
@distributed_trace_async
async def get(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_providers_get_request(
resource_provider_namespace=resource_provider_namespace,
subscription_id=self._config.subscription_id,
api_version=api_version,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"}
@distributed_trace_async
async def get_at_tenant_scope(
self,
resource_provider_namespace: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.Provider":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_providers_get_at_tenant_scope_request(
resource_provider_namespace=resource_provider_namespace,
api_version=api_version,
expand=expand,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Provider', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/{resourceProviderNamespace}"}
class ResourcesOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_by_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources"}
async def _move_resources_initial(
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"}
@distributed_trace_async
async def begin_move_resources(
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources"}
async def _validate_move_resources_initial(
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ResourcesMoveInfo')
request = build_resources_validate_move_resources_request_initial(
source_resource_group_name=source_resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._validate_move_resources_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"}
@distributed_trace_async
async def begin_validate_move_resources(
self,
source_resource_group_name: str,
parameters: "_models.ResourcesMoveInfo",
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._validate_move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate_move_resources.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources"}
@distributed_trace
def list(
self,
filter: Optional[str] = None,
expand: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
expand=expand,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resources_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resources"}
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
async def _delete_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
async def _update_initial(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_request_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}"}
@distributed_trace_async
async def check_existence_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_check_existence_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.check_existence_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': "/{resourceId}"}
async def _delete_by_id_initial(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_delete_by_id_request_initial(
resource_id=resource_id,
api_version=api_version,
template_url=self._delete_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': "/{resourceId}"}
@distributed_trace_async
async def begin_delete_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_by_id_initial(
resource_id=resource_id,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': "/{resourceId}"}
async def _create_or_update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_create_or_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._create_or_update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': "/{resourceId}"}
@distributed_trace_async
async def begin_create_or_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': "/{resourceId}"}
async def _update_by_id_initial(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> Optional["_models.GenericResource"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'GenericResource')
request = build_resources_update_by_id_request_initial(
resource_id=resource_id,
content_type=content_type,
api_version=api_version,
json=_json,
template_url=self._update_by_id_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': "/{resourceId}"}
@distributed_trace_async
async def begin_update_by_id(
self,
resource_id: str,
api_version: str,
parameters: "_models.GenericResource",
**kwargs: Any
) -> AsyncLROPoller["_models.GenericResource"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': "/{resourceId}"}
@distributed_trace_async
async def get_by_id(
self,
resource_id: str,
api_version: str,
**kwargs: Any
) -> "_models.GenericResource":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_resources_get_by_id_request(
resource_id=resource_id,
api_version=api_version,
template_url=self.get_by_id.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': "/{resourceId}"}
class ResourceGroupsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def check_existence(
self,
resource_group_name: str,
**kwargs: Any
) -> bool:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_resource_groups_check_existence_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.check_existence.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroup",
**kwargs: Any
) -> "_models.ResourceGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ResourceGroup')
request = build_resource_groups_create_or_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
async def _delete_initial(
self,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_resource_groups_delete_request_initial(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
api_version = kwargs.pop('api_version', "2019-08-01")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
**kwargs: Any
) -> "_models.ResourceGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_resource_groups_get_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
@distributed_trace_async
async def update(
self,
resource_group_name: str,
parameters: "_models.ResourceGroupPatchable",
**kwargs: Any
) -> "_models.ResourceGroup":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ResourceGroupPatchable')
request = build_resource_groups_update_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ResourceGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}"}
async def _export_template_initial(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> Optional["_models.ResourceGroupExportResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'ExportTemplateRequest')
request = build_resource_groups_export_template_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._export_template_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_template_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"}
@distributed_trace_async
async def begin_export_template(
self,
resource_group_name: str,
parameters: "_models.ExportTemplateRequest",
**kwargs: Any
) -> AsyncLROPoller["_models.ResourceGroupExportResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._export_template_initial(
resource_group_name=resource_group_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ResourceGroupExportResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_template.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate"}
@distributed_trace
def list(
self,
filter: Optional[str] = None,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.ResourceGroupListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
filter=filter,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_resource_groups_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ResourceGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups"}
class TagsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def delete_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_tags_delete_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"}
@distributed_trace_async
async def create_or_update_value(
self,
tag_name: str,
tag_value: str,
**kwargs: Any
) -> "_models.TagValue":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_tags_create_or_update_value_request(
tag_name=tag_name,
tag_value=tag_value,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update_value.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagValue', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagValue', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_value.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}"}
@distributed_trace_async
async def create_or_update(
self,
tag_name: str,
**kwargs: Any
) -> "_models.TagDetails":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_tags_create_or_update_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('TagDetails', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('TagDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"}
@distributed_trace_async
async def delete(
self,
tag_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_tags_delete_request(
tag_name=tag_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames/{tagName}"}
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TagsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_tags_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TagsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/tagNames"}
class DeploymentOperationsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_at_scope(
self,
scope: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployment_operations_get_at_scope_request(
scope=scope,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"}
@distributed_trace
def list_at_scope(
self,
scope: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_scope_request(
scope=scope,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_scope.metadata = {'url': "/{scope}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get_at_tenant_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployment_operations_get_at_tenant_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"}
@distributed_trace
def list_at_tenant_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_tenant_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_tenant_scope_request(
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_tenant_scope.metadata = {'url': "/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployment_operations_get_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
operation_id=operation_id,
api_version=api_version,
template_url=self.get_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"}
@distributed_trace
def list_at_management_group_scope(
self,
group_id: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
api_version=api_version,
top=top,
template_url=self.list_at_management_group_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_management_group_scope_request(
group_id=group_id,
deployment_name=deployment_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_management_group_scope.metadata = {'url': "/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get_at_subscription_scope(
self,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployment_operations_get_at_subscription_scope_request(
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}"}
@distributed_trace
def list_at_subscription_scope(
self,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list_at_subscription_scope.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_at_subscription_scope_request(
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_at_subscription_scope.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations"}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
deployment_name: str,
operation_id: str,
**kwargs: Any
) -> "_models.DeploymentOperation":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2019-08-01")
request = build_deployment_operations_get_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeploymentOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}"}
@distributed_trace
def list(
self,
resource_group_name: str,
deployment_name: str,
top: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeploymentOperationsListResult"]:
api_version = kwargs.pop('api_version', "2019-08-01")
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
top=top,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_deployment_operations_list_request(
resource_group_name=resource_group_name,
deployment_name=deployment_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DeploymentOperationsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations"}
| true
| true
|
790bb2ff511a693f4e1285c5398343c2b12ed192
| 2,608
|
py
|
Python
|
geometry_tools.py
|
helkebir/Reachable-Set-Inner-Approximation
|
4e05780b692214c26c76692f65f61d2f7f506e79
|
[
"MIT"
] | null | null | null |
geometry_tools.py
|
helkebir/Reachable-Set-Inner-Approximation
|
4e05780b692214c26c76692f65f61d2f7f506e79
|
[
"MIT"
] | null | null | null |
geometry_tools.py
|
helkebir/Reachable-Set-Inner-Approximation
|
4e05780b692214c26c76692f65f61d2f7f506e79
|
[
"MIT"
] | null | null | null |
import numpy as np
from shapely import geometry
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]:
"""Shrinks a 2D polygon by a given distance.
The coordinates of the polygon are expected as an N x 2-matrix,
and a positive distance results in inward shrinking.
An empty set is returned if the shrinking operation removes all
original elements.
Args:
coords: A matrix of coordinates.
dist: The distance to shrink by.
Returns:
A tuple containing the x, y coordinates of the original set, as
well as the x and y coordinates of the shrunken set, in that
order.
"""
my_polygon = geometry.Polygon(coords)
xy = my_polygon.exterior.xy
my_polygon_shrunken = my_polygon.buffer(-dist)
try:
xys = my_polygon_shrunken.exterior.xy
except AttributeError:
xys = ([0], [0]) # Empty set
return (*xy, *xys)
def hausdorff(A: np.ndarray, B: np.ndarray) -> float:
"""Computes the Hausdorff distance between two 2D polygons.
Args:
A: A matrix defining the first polygon.
B: A matrix defining the second polygon.
Returns:
A float representing the Hausdorff distance.
"""
return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
def read_polygon(file: str) -> np.ndarray:
"""Reads a polygon from a table.
Args:
file: Path to a file containing a plain text, tab-separated
table with scalars.
Returns:
A matrix containing the data in the file.
"""
return np.genfromtxt(file)
if __name__ == "__main__":
import matplotlib as mpl
import matplotlib.pyplot as plt
# Distance to shrink by
dh = 0.01
x, y, xs, ys = shrink(read_polygon('example.txt'), dh)
ax = plt.subplot()
ax.grid(which='major', alpha=0.5, color='k')
ax.grid(which='minor', alpha=0.3, color='k', linestyle=':')
ax.minorticks_on()
ax.set_axisbelow(True)
ax.fill(x, y, color='b', facecolor='lightskyblue',
edgecolor='dodgerblue', label='Original', alpha=0.75)
ax.fill(xs, ys, facecolor='mediumseagreen', edgecolor='forestgreen',
label='Shrunk', alpha=0.75)
ax.set_aspect('equal')
ax.legend()
golden = 0.01017601435813135
assert(np.isclose(
hausdorff(np.vstack([x, y]).T, np.vstack([xs, ys]).T),
golden
))
print("SUCCESS")
print(f'Area original: {geometry.Polygon(np.vstack([x, y]).T).area:.6f}')
print(f'Area shrunk: {geometry.Polygon(np.vstack([xs, ys]).T).area:.6f}')
plt.show()
| 28.977778
| 77
| 0.63842
|
import numpy as np
from shapely import geometry
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]:
my_polygon = geometry.Polygon(coords)
xy = my_polygon.exterior.xy
my_polygon_shrunken = my_polygon.buffer(-dist)
try:
xys = my_polygon_shrunken.exterior.xy
except AttributeError:
xys = ([0], [0])
return (*xy, *xys)
def hausdorff(A: np.ndarray, B: np.ndarray) -> float:
return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
def read_polygon(file: str) -> np.ndarray:
return np.genfromtxt(file)
if __name__ == "__main__":
import matplotlib as mpl
import matplotlib.pyplot as plt
dh = 0.01
x, y, xs, ys = shrink(read_polygon('example.txt'), dh)
ax = plt.subplot()
ax.grid(which='major', alpha=0.5, color='k')
ax.grid(which='minor', alpha=0.3, color='k', linestyle=':')
ax.minorticks_on()
ax.set_axisbelow(True)
ax.fill(x, y, color='b', facecolor='lightskyblue',
edgecolor='dodgerblue', label='Original', alpha=0.75)
ax.fill(xs, ys, facecolor='mediumseagreen', edgecolor='forestgreen',
label='Shrunk', alpha=0.75)
ax.set_aspect('equal')
ax.legend()
golden = 0.01017601435813135
assert(np.isclose(
hausdorff(np.vstack([x, y]).T, np.vstack([xs, ys]).T),
golden
))
print("SUCCESS")
print(f'Area original: {geometry.Polygon(np.vstack([x, y]).T).area:.6f}')
print(f'Area shrunk: {geometry.Polygon(np.vstack([xs, ys]).T).area:.6f}')
plt.show()
| true
| true
|
790bb3805d70650b4582bb054398b5842ab0fffc
| 17,938
|
py
|
Python
|
tests/nlu/base/test_training_data.py
|
vishnuvrpriya/rasa
|
60f6a86dfbdafcd62360a7e4a90be01cd437c4ea
|
[
"Apache-2.0"
] | 1
|
2019-11-03T02:21:17.000Z
|
2019-11-03T02:21:17.000Z
|
tests/nlu/base/test_training_data.py
|
vishnuvrpriya/rasa
|
60f6a86dfbdafcd62360a7e4a90be01cd437c4ea
|
[
"Apache-2.0"
] | 6
|
2020-01-28T23:04:20.000Z
|
2022-02-10T00:43:04.000Z
|
tests/nlu/base/test_training_data.py
|
vishnuvrpriya/rasa
|
60f6a86dfbdafcd62360a7e4a90be01cd437c4ea
|
[
"Apache-2.0"
] | 1
|
2021-06-08T17:24:15.000Z
|
2021-06-08T17:24:15.000Z
|
# -*- coding: utf-8 -*-
import pytest
import tempfile
from jsonschema import ValidationError
from rasa.nlu import training_data
from rasa.nlu.convert import convert_training_data
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.training_data import TrainingData
from rasa.nlu.training_data.formats import MarkdownReader
from rasa.nlu.training_data.formats.rasa import validate_rasa_nlu_data
from rasa.nlu.training_data.loading import guess_format, UNK, load_data
from rasa.nlu.training_data.util import get_file_format
import rasa.utils.io as io_utils
def test_example_training_data_is_valid():
demo_json = "data/examples/rasa/demo-rasa.json"
data = io_utils.read_json_file(demo_json)
validate_rasa_nlu_data(data)
@pytest.mark.parametrize(
"invalid_data",
[
{"wrong_top_level": []},
["this is not a toplevel dict"],
{
"rasa_nlu_data": {
"common_examples": [{"intent": "some example without text"}]
}
},
{
"rasa_nlu_data": {
"common_examples": [
{
"text": "mytext",
"entities": [{"start": "INVALID", "end": 0, "entity": "x"}],
}
]
}
},
],
)
def test_validation_is_throwing_exceptions(invalid_data):
with pytest.raises(ValidationError):
validate_rasa_nlu_data(invalid_data)
def test_luis_data():
td = training_data.load_data("data/examples/luis/demo-restaurants.json")
assert not td.is_empty()
assert len(td.entity_examples) == 8
assert len(td.intent_examples) == 28
assert len(td.training_examples) == 28
assert td.entity_synonyms == {}
assert td.intents == {"affirm", "goodbye", "greet", "inform"}
assert td.entities == {"location", "cuisine"}
def test_wit_data():
td = training_data.load_data("data/examples/wit/demo-flights.json")
assert not td.is_empty()
assert len(td.entity_examples) == 4
assert len(td.intent_examples) == 1
assert len(td.training_examples) == 4
assert td.entity_synonyms == {}
assert td.intents == {"flight_booking"}
assert td.entities == {"location", "datetime"}
def test_dialogflow_data():
td = training_data.load_data("data/examples/dialogflow/")
assert not td.is_empty()
assert len(td.entity_examples) == 5
assert len(td.intent_examples) == 24
assert len(td.training_examples) == 24
assert len(td.lookup_tables) == 2
assert td.intents == {"affirm", "goodbye", "hi", "inform"}
assert td.entities == {"cuisine", "location"}
non_trivial_synonyms = {k: v for k, v in td.entity_synonyms.items() if k != v}
assert non_trivial_synonyms == {
"mexico": "mexican",
"china": "chinese",
"india": "indian",
}
# The order changes based on different computers hence the grouping
assert {td.lookup_tables[0]["name"], td.lookup_tables[1]["name"]} == {
"location",
"cuisine",
}
assert {
len(td.lookup_tables[0]["elements"]),
len(td.lookup_tables[1]["elements"]),
} == {4, 6}
def test_lookup_table_json():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.json")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
def test_lookup_table_md():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.md")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
@pytest.mark.parametrize(
"files",
[
[
"data/examples/rasa/demo-rasa.json",
"data/examples/rasa/demo-rasa-responses.md",
],
[
"data/examples/rasa/demo-rasa.md",
"data/examples/rasa/demo-rasa-responses.md",
],
],
)
def test_demo_data(files):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(files, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert td.responses == {"I am Mr. Bot", "It's sunny where I live"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
assert len(td.response_examples) == 4
assert len(td.entity_examples) == 11
assert len(td.nlg_stories) == 2
assert td.entity_synonyms == {
"Chines": "chinese",
"Chinese": "chinese",
"chines": "chinese",
"vegg": "vegetarian",
"veggie": "vegetarian",
}
assert td.regex_features == [
{"name": "greet", "pattern": r"hey[^\s]*"},
{"name": "zipcode", "pattern": r"[0-9]{5}"},
]
@pytest.mark.parametrize(
"filepaths",
[["data/examples/rasa/demo-rasa.md", "data/examples/rasa/demo-rasa-responses.md"]],
)
def test_train_test_split(filepaths):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(filepaths, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
td_train, td_test = td.train_test_split(train_frac=0.8)
assert len(td_train.training_examples) == 35
assert len(td_test.training_examples) == 11
@pytest.mark.parametrize(
"files",
[
("data/examples/rasa/demo-rasa.json", "data/test/multiple_files_json"),
("data/examples/rasa/demo-rasa.md", "data/test/multiple_files_markdown"),
],
)
def test_data_merging(files):
td_reference = training_data.load_data(files[0])
td = training_data.load_data(files[1])
assert len(td.entity_examples) == len(td_reference.entity_examples)
assert len(td.intent_examples) == len(td_reference.intent_examples)
assert len(td.training_examples) == len(td_reference.training_examples)
assert td.intents == td_reference.intents
assert td.entities == td_reference.entities
assert td.entity_synonyms == td_reference.entity_synonyms
assert td.regex_features == td_reference.regex_features
def test_markdown_single_sections():
td_regex_only = training_data.load_data(
"data/test/markdown_single_sections/regex_only.md"
)
assert td_regex_only.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
td_syn_only = training_data.load_data(
"data/test/markdown_single_sections/synonyms_only.md"
)
assert td_syn_only.entity_synonyms == {"Chines": "chinese", "Chinese": "chinese"}
def test_repeated_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "book a table today from 3 to 6 for 3 people",
"intent": "unk",
"entities": [
{
"entity": "description",
"start": 35,
"end": 36,
"value": "3"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 9
assert end == 10
def test_multiword_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "New York City"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 4
assert end == 7
def test_nonascii_entities():
data = """
{
"luis_schema_version": "2.0",
"utterances" : [
{
"text": "I am looking for a ßäæ ?€ö) item",
"intent": "unk",
"entities": [
{
"entity": "description",
"startPos": 19,
"endPos": 26
}
]
}
]
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
entity = entities[0]
assert entity["value"] == "ßäæ ?€ö)"
assert entity["start"] == 19
assert entity["end"] == 27
assert entity["entity"] == "description"
def test_entities_synonyms():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
},
{
"text": "show me flights to nyc",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 22,
"value": "nyc"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert td.entity_synonyms["New York City"] == "nyc"
def cmp_message_list(firsts, seconds):
assert len(firsts) == len(seconds), "Message lists have unequal length"
def cmp_dict_list(firsts, seconds):
if len(firsts) != len(seconds):
return False
for a in firsts:
for idx, b in enumerate(seconds):
if hash(a) == hash(b):
del seconds[idx]
break
else:
others = ", ".join([e.text for e in seconds])
assert False, "Failed to find message {} in {}".format(a.text, others)
return not seconds
@pytest.mark.parametrize(
"data_file,gold_standard_file,output_format,language",
[
(
"data/examples/wit/demo-flights.json",
"data/test/wit_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/luis/demo-restaurants.json",
"data/test/luis_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_en_converted_to_rasa.json",
"json",
"en",
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_es_converted_to_rasa.json",
"json",
"es",
),
(
"data/examples/rasa/demo-rasa.md",
"data/test/md_converted_to_json.json",
"json",
None,
),
(
"data/examples/rasa/demo-rasa.json",
"data/test/json_converted_to_md.md",
"md",
None,
),
(
"data/test/training_data_containing_special_chars.json",
"data/test/json_with_special_chars_convered_to_md.md",
"md",
None,
),
],
)
def test_training_data_conversion(
tmpdir, data_file, gold_standard_file, output_format, language
):
out_path = tmpdir.join("rasa_nlu_data.json")
convert_training_data(data_file, out_path.strpath, output_format, language)
td = training_data.load_data(out_path.strpath, language)
assert td.entity_examples != []
assert td.intent_examples != []
gold_standard = training_data.load_data(gold_standard_file, language)
cmp_message_list(td.entity_examples, gold_standard.entity_examples)
cmp_message_list(td.intent_examples, gold_standard.intent_examples)
assert td.entity_synonyms == gold_standard.entity_synonyms
# converting the converted file back to original
# file format and performing the same tests
rto_path = tmpdir.join("data_in_original_format.txt")
convert_training_data(out_path.strpath, rto_path.strpath, "json", language)
rto = training_data.load_data(rto_path.strpath, language)
cmp_message_list(gold_standard.entity_examples, rto.entity_examples)
cmp_message_list(gold_standard.intent_examples, rto.intent_examples)
assert gold_standard.entity_synonyms == rto.entity_synonyms
# If the above assert fails - this can be used
# to dump to the file and diff using git
# with io.open(gold_standard_file) as f:
# f.write(td.as_json(indent=2))
def test_url_data_format():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
}
]
}
}"""
fname = io_utils.create_temporary_file(
data.encode("utf-8"), suffix="_tmp_training_data.json", mode="w+b"
)
data = io_utils.read_json_file(fname)
assert data is not None
validate_rasa_nlu_data(data)
def test_markdown_entity_regex():
r = MarkdownReader()
md = """
## intent:restaurant_search
- i'm looking for a place to eat
- i'm looking for a place in the [north](loc-direction) of town
- show me [chines](cuisine:chinese) restaurants
- show me [chines](22_ab-34*3.A:43er*+?df) restaurants
"""
result = r.reads(md)
assert len(result.training_examples) == 4
first = result.training_examples[0]
assert first.data == {"intent": "restaurant_search"}
assert first.text == "i'm looking for a place to eat"
second = result.training_examples[1]
assert second.data == {
"intent": "restaurant_search",
"entities": [
{"start": 31, "end": 36, "value": "north", "entity": "loc-direction"}
],
}
assert second.text == "i'm looking for a place in the north of town"
third = result.training_examples[2]
assert third.data == {
"intent": "restaurant_search",
"entities": [{"start": 8, "end": 14, "value": "chinese", "entity": "cuisine"}],
}
assert third.text == "show me chines restaurants"
fourth = result.training_examples[3]
assert fourth.data == {
"intent": "restaurant_search",
"entities": [
{"start": 8, "end": 14, "value": "43er*+?df", "entity": "22_ab-34*3.A"}
],
}
assert fourth.text == "show me chines restaurants"
def test_get_file_format():
fformat = get_file_format("data/examples/luis/demo-restaurants.json")
assert fformat == "json"
fformat = get_file_format("data/examples")
assert fformat == "json"
fformat = get_file_format("examples/restaurantbot/data/nlu.md")
assert fformat == "md"
with pytest.raises(AttributeError):
get_file_format("path-does-not-exists")
with pytest.raises(AttributeError):
get_file_format(None)
def test_guess_format_from_non_existing_file_path():
assert guess_format("not existing path") == UNK
def test_load_data_from_non_existing_file():
with pytest.raises(ValueError):
load_data("some path")
def test_is_empty():
assert TrainingData().is_empty()
def test_markdown_empty_section():
data = training_data.load_data(
"data/test/markdown_single_sections/empty_section.md"
)
assert data.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
assert not data.entity_synonyms
assert len(data.lookup_tables) == 1
assert data.lookup_tables[0]["name"] == "chinese"
assert "Chinese" in data.lookup_tables[0]["elements"]
assert "Chines" in data.lookup_tables[0]["elements"]
def test_markdown_not_existing_section():
with pytest.raises(ValueError):
training_data.load_data(
"data/test/markdown_single_sections/not_existing_section.md"
)
| 30.249578
| 88
| 0.606088
|
import pytest
import tempfile
from jsonschema import ValidationError
from rasa.nlu import training_data
from rasa.nlu.convert import convert_training_data
from rasa.nlu.extractors.mitie_entity_extractor import MitieEntityExtractor
from rasa.nlu.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from rasa.nlu.training_data import TrainingData
from rasa.nlu.training_data.formats import MarkdownReader
from rasa.nlu.training_data.formats.rasa import validate_rasa_nlu_data
from rasa.nlu.training_data.loading import guess_format, UNK, load_data
from rasa.nlu.training_data.util import get_file_format
import rasa.utils.io as io_utils
def test_example_training_data_is_valid():
demo_json = "data/examples/rasa/demo-rasa.json"
data = io_utils.read_json_file(demo_json)
validate_rasa_nlu_data(data)
@pytest.mark.parametrize(
"invalid_data",
[
{"wrong_top_level": []},
["this is not a toplevel dict"],
{
"rasa_nlu_data": {
"common_examples": [{"intent": "some example without text"}]
}
},
{
"rasa_nlu_data": {
"common_examples": [
{
"text": "mytext",
"entities": [{"start": "INVALID", "end": 0, "entity": "x"}],
}
]
}
},
],
)
def test_validation_is_throwing_exceptions(invalid_data):
with pytest.raises(ValidationError):
validate_rasa_nlu_data(invalid_data)
def test_luis_data():
td = training_data.load_data("data/examples/luis/demo-restaurants.json")
assert not td.is_empty()
assert len(td.entity_examples) == 8
assert len(td.intent_examples) == 28
assert len(td.training_examples) == 28
assert td.entity_synonyms == {}
assert td.intents == {"affirm", "goodbye", "greet", "inform"}
assert td.entities == {"location", "cuisine"}
def test_wit_data():
td = training_data.load_data("data/examples/wit/demo-flights.json")
assert not td.is_empty()
assert len(td.entity_examples) == 4
assert len(td.intent_examples) == 1
assert len(td.training_examples) == 4
assert td.entity_synonyms == {}
assert td.intents == {"flight_booking"}
assert td.entities == {"location", "datetime"}
def test_dialogflow_data():
td = training_data.load_data("data/examples/dialogflow/")
assert not td.is_empty()
assert len(td.entity_examples) == 5
assert len(td.intent_examples) == 24
assert len(td.training_examples) == 24
assert len(td.lookup_tables) == 2
assert td.intents == {"affirm", "goodbye", "hi", "inform"}
assert td.entities == {"cuisine", "location"}
non_trivial_synonyms = {k: v for k, v in td.entity_synonyms.items() if k != v}
assert non_trivial_synonyms == {
"mexico": "mexican",
"china": "chinese",
"india": "indian",
}
assert {td.lookup_tables[0]["name"], td.lookup_tables[1]["name"]} == {
"location",
"cuisine",
}
assert {
len(td.lookup_tables[0]["elements"]),
len(td.lookup_tables[1]["elements"]),
} == {4, 6}
def test_lookup_table_json():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.json")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
def test_lookup_table_md():
lookup_fname = "data/test/lookup_tables/plates.txt"
td_lookup = training_data.load_data("data/test/lookup_tables/lookup_table.md")
assert not td_lookup.is_empty()
assert td_lookup.lookup_tables[0]["name"] == "plates"
assert td_lookup.lookup_tables[0]["elements"] == lookup_fname
assert td_lookup.lookup_tables[1]["name"] == "drinks"
assert td_lookup.lookup_tables[1]["elements"] == [
"mojito",
"lemonade",
"sweet berry wine",
"tea",
"club mate",
]
@pytest.mark.parametrize(
"files",
[
[
"data/examples/rasa/demo-rasa.json",
"data/examples/rasa/demo-rasa-responses.md",
],
[
"data/examples/rasa/demo-rasa.md",
"data/examples/rasa/demo-rasa-responses.md",
],
],
)
def test_demo_data(files):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(files, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert td.responses == {"I am Mr. Bot", "It's sunny where I live"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
assert len(td.response_examples) == 4
assert len(td.entity_examples) == 11
assert len(td.nlg_stories) == 2
assert td.entity_synonyms == {
"Chines": "chinese",
"Chinese": "chinese",
"chines": "chinese",
"vegg": "vegetarian",
"veggie": "vegetarian",
}
assert td.regex_features == [
{"name": "greet", "pattern": r"hey[^\s]*"},
{"name": "zipcode", "pattern": r"[0-9]{5}"},
]
@pytest.mark.parametrize(
"filepaths",
[["data/examples/rasa/demo-rasa.md", "data/examples/rasa/demo-rasa-responses.md"]],
)
def test_train_test_split(filepaths):
from rasa.importers.utils import training_data_from_paths
td = training_data_from_paths(filepaths, language="en")
assert td.intents == {"affirm", "greet", "restaurant_search", "goodbye", "chitchat"}
assert td.entities == {"location", "cuisine"}
assert len(td.training_examples) == 46
assert len(td.intent_examples) == 46
td_train, td_test = td.train_test_split(train_frac=0.8)
assert len(td_train.training_examples) == 35
assert len(td_test.training_examples) == 11
@pytest.mark.parametrize(
"files",
[
("data/examples/rasa/demo-rasa.json", "data/test/multiple_files_json"),
("data/examples/rasa/demo-rasa.md", "data/test/multiple_files_markdown"),
],
)
def test_data_merging(files):
td_reference = training_data.load_data(files[0])
td = training_data.load_data(files[1])
assert len(td.entity_examples) == len(td_reference.entity_examples)
assert len(td.intent_examples) == len(td_reference.intent_examples)
assert len(td.training_examples) == len(td_reference.training_examples)
assert td.intents == td_reference.intents
assert td.entities == td_reference.entities
assert td.entity_synonyms == td_reference.entity_synonyms
assert td.regex_features == td_reference.regex_features
def test_markdown_single_sections():
td_regex_only = training_data.load_data(
"data/test/markdown_single_sections/regex_only.md"
)
assert td_regex_only.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
td_syn_only = training_data.load_data(
"data/test/markdown_single_sections/synonyms_only.md"
)
assert td_syn_only.entity_synonyms == {"Chines": "chinese", "Chinese": "chinese"}
def test_repeated_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "book a table today from 3 to 6 for 3 people",
"intent": "unk",
"entities": [
{
"entity": "description",
"start": 35,
"end": 36,
"value": "3"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 9
assert end == 10
def test_multiword_entities():
data = """
{
"rasa_nlu_data": {
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "New York City"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
tokens = WhitespaceTokenizer().tokenize(example.text)
start, end = MitieEntityExtractor.find_entity(entities[0], example.text, tokens)
assert start == 4
assert end == 7
def test_nonascii_entities():
data = """
{
"luis_schema_version": "2.0",
"utterances" : [
{
"text": "I am looking for a ßäæ ?€ö) item",
"intent": "unk",
"entities": [
{
"entity": "description",
"startPos": 19,
"endPos": 26
}
]
}
]
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert len(td.entity_examples) == 1
example = td.entity_examples[0]
entities = example.get("entities")
assert len(entities) == 1
entity = entities[0]
assert entity["value"] == "ßäæ ?€ö)"
assert entity["start"] == 19
assert entity["end"] == 27
assert entity["entity"] == "description"
def test_entities_synonyms():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
},
{
"text": "show me flights to nyc",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 22,
"value": "nyc"
}
]
}
]
}
}"""
with tempfile.NamedTemporaryFile(suffix="_tmp_training_data.json") as f:
f.write(data.encode("utf-8"))
f.flush()
td = training_data.load_data(f.name)
assert td.entity_synonyms["New York City"] == "nyc"
def cmp_message_list(firsts, seconds):
assert len(firsts) == len(seconds), "Message lists have unequal length"
def cmp_dict_list(firsts, seconds):
if len(firsts) != len(seconds):
return False
for a in firsts:
for idx, b in enumerate(seconds):
if hash(a) == hash(b):
del seconds[idx]
break
else:
others = ", ".join([e.text for e in seconds])
assert False, "Failed to find message {} in {}".format(a.text, others)
return not seconds
@pytest.mark.parametrize(
"data_file,gold_standard_file,output_format,language",
[
(
"data/examples/wit/demo-flights.json",
"data/test/wit_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/luis/demo-restaurants.json",
"data/test/luis_converted_to_rasa.json",
"json",
None,
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_en_converted_to_rasa.json",
"json",
"en",
),
(
"data/examples/dialogflow/",
"data/test/dialogflow_es_converted_to_rasa.json",
"json",
"es",
),
(
"data/examples/rasa/demo-rasa.md",
"data/test/md_converted_to_json.json",
"json",
None,
),
(
"data/examples/rasa/demo-rasa.json",
"data/test/json_converted_to_md.md",
"md",
None,
),
(
"data/test/training_data_containing_special_chars.json",
"data/test/json_with_special_chars_convered_to_md.md",
"md",
None,
),
],
)
def test_training_data_conversion(
tmpdir, data_file, gold_standard_file, output_format, language
):
out_path = tmpdir.join("rasa_nlu_data.json")
convert_training_data(data_file, out_path.strpath, output_format, language)
td = training_data.load_data(out_path.strpath, language)
assert td.entity_examples != []
assert td.intent_examples != []
gold_standard = training_data.load_data(gold_standard_file, language)
cmp_message_list(td.entity_examples, gold_standard.entity_examples)
cmp_message_list(td.intent_examples, gold_standard.intent_examples)
assert td.entity_synonyms == gold_standard.entity_synonyms
# converting the converted file back to original
# file format and performing the same tests
rto_path = tmpdir.join("data_in_original_format.txt")
convert_training_data(out_path.strpath, rto_path.strpath, "json", language)
rto = training_data.load_data(rto_path.strpath, language)
cmp_message_list(gold_standard.entity_examples, rto.entity_examples)
cmp_message_list(gold_standard.intent_examples, rto.intent_examples)
assert gold_standard.entity_synonyms == rto.entity_synonyms
# If the above assert fails - this can be used
# to dump to the file and diff using git
# with io.open(gold_standard_file) as f:
# f.write(td.as_json(indent=2))
def test_url_data_format():
data = """
{
"rasa_nlu_data": {
"entity_synonyms": [
{
"value": "nyc",
"synonyms": ["New York City", "nyc", "the big apple"]
}
],
"common_examples" : [
{
"text": "show me flights to New York City",
"intent": "unk",
"entities": [
{
"entity": "destination",
"start": 19,
"end": 32,
"value": "NYC"
}
]
}
]
}
}"""
fname = io_utils.create_temporary_file(
data.encode("utf-8"), suffix="_tmp_training_data.json", mode="w+b"
)
data = io_utils.read_json_file(fname)
assert data is not None
validate_rasa_nlu_data(data)
def test_markdown_entity_regex():
r = MarkdownReader()
md = """
## intent:restaurant_search
- i'm looking for a place to eat
- i'm looking for a place in the [north](loc-direction) of town
- show me [chines](cuisine:chinese) restaurants
- show me [chines](22_ab-34*3.A:43er*+?df) restaurants
"""
result = r.reads(md)
assert len(result.training_examples) == 4
first = result.training_examples[0]
assert first.data == {"intent": "restaurant_search"}
assert first.text == "i'm looking for a place to eat"
second = result.training_examples[1]
assert second.data == {
"intent": "restaurant_search",
"entities": [
{"start": 31, "end": 36, "value": "north", "entity": "loc-direction"}
],
}
assert second.text == "i'm looking for a place in the north of town"
third = result.training_examples[2]
assert third.data == {
"intent": "restaurant_search",
"entities": [{"start": 8, "end": 14, "value": "chinese", "entity": "cuisine"}],
}
assert third.text == "show me chines restaurants"
fourth = result.training_examples[3]
assert fourth.data == {
"intent": "restaurant_search",
"entities": [
{"start": 8, "end": 14, "value": "43er*+?df", "entity": "22_ab-34*3.A"}
],
}
assert fourth.text == "show me chines restaurants"
def test_get_file_format():
fformat = get_file_format("data/examples/luis/demo-restaurants.json")
assert fformat == "json"
fformat = get_file_format("data/examples")
assert fformat == "json"
fformat = get_file_format("examples/restaurantbot/data/nlu.md")
assert fformat == "md"
with pytest.raises(AttributeError):
get_file_format("path-does-not-exists")
with pytest.raises(AttributeError):
get_file_format(None)
def test_guess_format_from_non_existing_file_path():
assert guess_format("not existing path") == UNK
def test_load_data_from_non_existing_file():
with pytest.raises(ValueError):
load_data("some path")
def test_is_empty():
assert TrainingData().is_empty()
def test_markdown_empty_section():
data = training_data.load_data(
"data/test/markdown_single_sections/empty_section.md"
)
assert data.regex_features == [{"name": "greet", "pattern": r"hey[^\s]*"}]
assert not data.entity_synonyms
assert len(data.lookup_tables) == 1
assert data.lookup_tables[0]["name"] == "chinese"
assert "Chinese" in data.lookup_tables[0]["elements"]
assert "Chines" in data.lookup_tables[0]["elements"]
def test_markdown_not_existing_section():
with pytest.raises(ValueError):
training_data.load_data(
"data/test/markdown_single_sections/not_existing_section.md"
)
| true
| true
|
790bb397ed174d5bc527ff81c8425cde79b6a1f3
| 1,163
|
py
|
Python
|
my_blog/users/tests/test_forms.py
|
Tanishk-Sharma/my_blog
|
c6b24897b4d3745426749f5e6599e41f3f479d38
|
[
"MIT"
] | null | null | null |
my_blog/users/tests/test_forms.py
|
Tanishk-Sharma/my_blog
|
c6b24897b4d3745426749f5e6599e41f3f479d38
|
[
"MIT"
] | null | null | null |
my_blog/users/tests/test_forms.py
|
Tanishk-Sharma/my_blog
|
c6b24897b4d3745426749f5e6599e41f3f479d38
|
[
"MIT"
] | null | null | null |
"""
Module for all Form Tests.
"""
import pytest
from django.utils.translation import gettext_lazy as _
from my_blog.users.forms import UserCreationForm
from my_blog.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
"""
Test class for all tests related to the UserCreationForm
"""
def test_username_validation_error_msg(self, user: User):
"""
Tests UserCreation Form's unique validator functions correctly by testing:
1) A new user with an existing username cannot be added.
2) Only 1 error is raised by the UserCreation Form
3) The desired error message is raised
"""
# The user already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| 29.075
| 87
| 0.628547
|
import pytest
from django.utils.translation import gettext_lazy as _
from my_blog.users.forms import UserCreationForm
from my_blog.users.models import User
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_username_validation_error_msg(self, user: User):
form = UserCreationForm(
{
"username": user.username,
"password1": user.password,
"password2": user.password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
assert form.errors["username"][0] == _("This username has already been taken.")
| true
| true
|
790bb3da0331fc7632f49bd52fac6e12ea0f1c75
| 8,087
|
py
|
Python
|
loss.py
|
miroozyx/Magin-Based-loss
|
fedb43af495d60079fe87ecee8b4ad1c59e17cdc
|
[
"Apache-2.0"
] | 4
|
2020-09-03T16:16:09.000Z
|
2021-06-20T22:08:17.000Z
|
loss.py
|
miroozyx/Margin-Based-Loss
|
fedb43af495d60079fe87ecee8b4ad1c59e17cdc
|
[
"Apache-2.0"
] | null | null | null |
loss.py
|
miroozyx/Margin-Based-Loss
|
fedb43af495d60079fe87ecee8b4ad1c59e17cdc
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Distance weighted sampling.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
# Arguments:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
a_indices: indices of anchors.
anchors: sampled anchor embeddings.
positives: sampled positive embeddings.
negatives: sampled negative embeddings.
"""
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
# cut off to void high variance.
distances = tf.maximum(distances, high_var_threshold)
# subtract max(log(distance)) for stability
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
# sample only negative examples by setting weights of the same class examples to 0.
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
# number of negative/positive samples to sampling per sample.
# For imbalanced data, this sampling method can be a sample weighted method.
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
# anchors indices
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
# negative sampling
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
# postive samping
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""
Computes the margin base loss.
# References
- [sampling matters in deep embedding learning]
(https://arxiv.org/abs/1706.07567)
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
beta_in: float,int or 1-D, float `Tensor` with shape [labels_size] of multi-class boundary parameters.
margin: Float, margin term in the loss function.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
margin_based_Loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
# normalize based on the number of pairs
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
"""distance weighted sampling + triplet loss
Args:
labels: 1-D. tf.int32 `Tensor` with shape [batch_size] of multi-class integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should be l2 normalized.
margin: Float, margin term in the loss function.
squared: Boolean, whether or not to square the triplet distances.
nu: float. Regularization parameter for beta.
high_var_threshold: float. cutoff for high gradient variance.
nonzero_loss_threshold: float. cutoff for non-zero loss zone.
neg_multiplier: int, default=1. the multiplier to enlarger the negative and positive samples.
Returns:
triplet_loss: tf.float32 scalar
"""
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
| 45.948864
| 128
| 0.632497
|
import tensorflow as tf
from tensorflow.contrib.losses.python.metric_learning.metric_loss_ops import pairwise_distance
def dist_weighted_sampling(labels, embeddings, high_var_threshold=0.5, nonzero_loss_threshold=1.4, neg_multiplier=1):
if not isinstance(neg_multiplier, int):
raise ValueError("`neg_multiplier` must be an integer.")
n = tf.size(labels)
if not isinstance(embeddings, tf.Tensor):
embeddings = tf.convert_to_tensor(embeddings)
d = embeddings.shape[1].value
distances = pairwise_distance(embeddings, squared=False)
distances = tf.maximum(distances, high_var_threshold)
log_weights = (2 - d) * tf.log(distances + 1e-16) - 0.5 * (d - 3) * tf.log(1 + 1e-16 - 0.25 * (distances**2))
weights = tf.exp(log_weights - tf.reduce_max(log_weights))
lshape = tf.shape(labels)
assert lshape.shape == 1
labels = tf.reshape(labels, [lshape[0], 1])
adjacency = tf.equal(labels, tf.transpose(labels))
adjacency_not = tf.logical_not(adjacency)
mask = tf.cast(adjacency_not, tf.float32)
adjacency_ex = tf.cast(adjacency, tf.int32) - tf.diag(tf.ones(n, dtype=tf.int32))
m = tf.reduce_sum(adjacency_ex, axis=1)
if tf.reduce_min(m) == 0:
m = tf.diag(tf.cast(tf.equal(m,0), tf.int32))
adjacency_ex += m
k = tf.maximum(tf.reduce_max(m),1) * neg_multiplier
pos_weights = tf.cast(adjacency_ex, tf.float32)
weights = weights * mask * tf.cast(distances < nonzero_loss_threshold, tf.float32)
weights = weights / (tf.reduce_sum(weights, axis=1, keepdims=True) + 1e-16)
a_indices = tf.reshape(tf.range(n), (-1,1))
a_indices = tf.tile(a_indices, [1, k])
a_indices = tf.reshape(a_indices, (-1,))
def neg_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
n_indices = tf.map_fn(neg_sampling, tf.range(n), dtype=tf.int32)
n_indices = tf.reshape(n_indices, (-1,))
def pos_sampling(i):
s = tf.squeeze(tf.multinomial(tf.log(tf.expand_dims(pos_weights[i] + 1e-16, axis=0)), k, output_dtype=tf.int32), axis=0)
return s
p_indices = tf.map_fn(pos_sampling, tf.range(n), dtype=tf.int32)
p_indices = tf.reshape(p_indices, (-1,))
anchors = tf.gather(embeddings, a_indices, name='gather_anchors')
positives = tf.gather(embeddings, p_indices, name='gather_pos')
negatives = tf.gather(embeddings, n_indices, name='gather_neg')
return a_indices, anchors, positives, negatives
def margin_based_loss(labels, embeddings, beta_in=1.0, margin=0.2, nu=0.0, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
if isinstance(beta_in, (float,int)):
beta = beta_in
beta_reg_loss = 0.0
else:
if isinstance(beta_in, tf.Tensor):
assert tf.shape(beta_in).shape == 1
k = tf.size(a_indices) / tf.size(labels)
k = tf.cast(k, tf.int32)
beta = tf.reshape(beta_in, (-1, 1))
beta = tf.tile(beta, [1, k])
beta = tf.reshape(beta, (-1,))
beta_reg_loss = tf.reduce_sum(beta) * nu
else:
raise ValueError("`beta_in` must be one of [float, int, tf.Tensor].")
d_ap = tf.sqrt(tf.reduce_sum(tf.square(positives - anchors), axis=1) + 1e-16)
d_an = tf.sqrt(tf.reduce_sum(tf.square(negatives - anchors), axis=1) + 1e-16)
pos_loss = tf.maximum(margin + d_ap - beta, 0)
neg_loss = tf.maximum(margin + beta - d_an, 0)
pair_cnt = tf.cast(tf.size(a_indices), tf.float32)
loss = (tf.reduce_sum(pos_loss) + tf.reduce_sum(neg_loss) + beta_reg_loss) / pair_cnt
return loss
def distance_weighted_triplet_loss(labels, embeddings, margin=1.0, squared=False, high_var_threshold=0.5,
nonzero_loss_threshold=1.4, neg_multiplier=1):
a_indices, anchors, positives, negatives = dist_weighted_sampling(labels,
embeddings,
high_var_threshold=high_var_threshold,
nonzero_loss_threshold=nonzero_loss_threshold,
neg_multiplier=neg_multiplier)
d_ap = tf.reduce_sum(tf.square(positives - anchors), axis=1)
d_an = tf.reduce_sum(tf.square(negatives - anchors), axis=1)
if not squared:
d_ap = K.sqrt(d_ap + 1e-16)
d_an = K.sqrt(d_an + 1e-16)
loss = tf.maximum(d_ap - d_an + margin, 0)
loss = tf.reduce_mean(loss)
return loss
| true
| true
|
790bb4670d9c988c89613051847c4e05a6a4ff6e
| 1,448
|
py
|
Python
|
category_encoders/__init__.py
|
RoyalTS/category_encoders
|
a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402
|
[
"BSD-3-Clause"
] | 1
|
2021-07-09T08:14:31.000Z
|
2021-07-09T08:14:31.000Z
|
category_encoders/__init__.py
|
RoyalTS/category_encoders
|
a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402
|
[
"BSD-3-Clause"
] | null | null | null |
category_encoders/__init__.py
|
RoyalTS/category_encoders
|
a810a4b7abfce9fc4eb7fc401e3d37f2c1c6e402
|
[
"BSD-3-Clause"
] | null | null | null |
"""
.. module:: category_encoders
:synopsis:
:platform:
"""
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.binary import BinaryEncoder
from category_encoders.count import CountEncoder
from category_encoders.hashing import HashingEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.polynomial import PolynomialEncoder
from category_encoders.basen import BaseNEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.glmm import GLMMEncoder
__version__ = '2.2.2'
__author__ = 'willmcginnis'
__all__ = [
'BackwardDifferenceEncoder',
'BinaryEncoder',
'CountEncoder',
'HashingEncoder',
'HelmertEncoder',
'OneHotEncoder',
'OrdinalEncoder',
'SumEncoder',
'PolynomialEncoder',
'BaseNEncoder',
'LeaveOneOutEncoder',
'TargetEncoder',
'WOEEncoder',
'MEstimateEncoder',
'JamesSteinEncoder',
'CatBoostEncoder',
'GLMMEncoder'
]
| 28.96
| 75
| 0.812155
|
from category_encoders.backward_difference import BackwardDifferenceEncoder
from category_encoders.binary import BinaryEncoder
from category_encoders.count import CountEncoder
from category_encoders.hashing import HashingEncoder
from category_encoders.helmert import HelmertEncoder
from category_encoders.one_hot import OneHotEncoder
from category_encoders.ordinal import OrdinalEncoder
from category_encoders.sum_coding import SumEncoder
from category_encoders.polynomial import PolynomialEncoder
from category_encoders.basen import BaseNEncoder
from category_encoders.leave_one_out import LeaveOneOutEncoder
from category_encoders.target_encoder import TargetEncoder
from category_encoders.woe import WOEEncoder
from category_encoders.m_estimate import MEstimateEncoder
from category_encoders.james_stein import JamesSteinEncoder
from category_encoders.cat_boost import CatBoostEncoder
from category_encoders.glmm import GLMMEncoder
__version__ = '2.2.2'
__author__ = 'willmcginnis'
__all__ = [
'BackwardDifferenceEncoder',
'BinaryEncoder',
'CountEncoder',
'HashingEncoder',
'HelmertEncoder',
'OneHotEncoder',
'OrdinalEncoder',
'SumEncoder',
'PolynomialEncoder',
'BaseNEncoder',
'LeaveOneOutEncoder',
'TargetEncoder',
'WOEEncoder',
'MEstimateEncoder',
'JamesSteinEncoder',
'CatBoostEncoder',
'GLMMEncoder'
]
| true
| true
|
790bb4e3fe5e16fb5a9d5d7f20428ad6ca73a505
| 167
|
py
|
Python
|
app/app/urls.py
|
AveraqeDev/django-react
|
2b081f7018be4e193f47d6267c96a1b7cfc816cc
|
[
"MIT"
] | null | null | null |
app/app/urls.py
|
AveraqeDev/django-react
|
2b081f7018be4e193f47d6267c96a1b7cfc816cc
|
[
"MIT"
] | 6
|
2021-03-18T22:00:46.000Z
|
2021-09-22T18:06:26.000Z
|
app/app/urls.py
|
AveraqeDev/django-react
|
2b081f7018be4e193f47d6267c96a1b7cfc816cc
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('frontend.urls')),
path('admin/', admin.site.urls),
]
| 20.875
| 39
| 0.688623
|
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('frontend.urls')),
path('admin/', admin.site.urls),
]
| true
| true
|
790bb81cbf85ab84246154564af2a426da40ed3c
| 1,447
|
py
|
Python
|
gui/addmealpopup.py
|
Penaz91/fjournal
|
0cf1634f67308f3491241d1bb250772ce4def2a0
|
[
"MIT"
] | null | null | null |
gui/addmealpopup.py
|
Penaz91/fjournal
|
0cf1634f67308f3491241d1bb250772ce4def2a0
|
[
"MIT"
] | null | null | null |
gui/addmealpopup.py
|
Penaz91/fjournal
|
0cf1634f67308f3491241d1bb250772ce4def2a0
|
[
"MIT"
] | null | null | null |
"""
This file is part of the FJournal Project.
Copyright © 2019-2020, Daniele Penazzo. All Rights Reserved.
The use of this code is governed by the MIT license attached.
See the LICENSE file for the full license.
Created on: 2020-07-10
Author: Penaz
"""
from tkinter import ttk
import tkinter as tk
from models import Meal
class AddMealPopup(ttk.Frame):
"""
Defines a popup for adding meals
"""
def __init__(self, master=None, session=None):
"""
Constructor of the class
"""
super().__init__(master)
self.master = master
self.grid(row=0, column=0)
self.session = session
self.mealname = tk.StringVar()
self.create_widgets()
def create_widgets(self):
"""
Creates the widgets for the popup
"""
self.meallbl = ttk.Label(self, text="Meal Name")
self.meallbl.grid(row=0, column=0)
self.mealinput = ttk.Entry(self, textvariable=self.mealname)
self.mealinput.grid(row=0, column=1)
self.addbtn = ttk.Button(self,
text="Confirm",
command=self.add_meal)
self.addbtn.grid(row=1, column=0, columnspan=2)
def add_meal(self):
"""
Opens the Add Meal popup
"""
meal = Meal(name=self.mealname.get())
self.session.add(meal)
self.session.commit()
self.master.destroy()
| 27.301887
| 68
| 0.595715
|
from tkinter import ttk
import tkinter as tk
from models import Meal
class AddMealPopup(ttk.Frame):
def __init__(self, master=None, session=None):
super().__init__(master)
self.master = master
self.grid(row=0, column=0)
self.session = session
self.mealname = tk.StringVar()
self.create_widgets()
def create_widgets(self):
self.meallbl = ttk.Label(self, text="Meal Name")
self.meallbl.grid(row=0, column=0)
self.mealinput = ttk.Entry(self, textvariable=self.mealname)
self.mealinput.grid(row=0, column=1)
self.addbtn = ttk.Button(self,
text="Confirm",
command=self.add_meal)
self.addbtn.grid(row=1, column=0, columnspan=2)
def add_meal(self):
meal = Meal(name=self.mealname.get())
self.session.add(meal)
self.session.commit()
self.master.destroy()
| true
| true
|
790bb8453995886052183835f511324b191aca37
| 1,827
|
py
|
Python
|
python-package/setup.py
|
ccgcyber/xlearn
|
ce92933de81b4372fbe54a597583c40ebb946c40
|
[
"Apache-2.0"
] | null | null | null |
python-package/setup.py
|
ccgcyber/xlearn
|
ce92933de81b4372fbe54a597583c40ebb946c40
|
[
"Apache-2.0"
] | null | null | null |
python-package/setup.py
|
ccgcyber/xlearn
|
ce92933de81b4372fbe54a597583c40ebb946c40
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 by contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""Setup xlearn package."""
from __future__ import absolute_import
import sys
import os
from setuptools import setup, find_packages
sys.path.insert(0, '.')
CURRENT_DIR = os.path.dirname(__file__)
libpath_py = os.path.join(CURRENT_DIR, 'xlearn/libpath.py')
libpath = {'__file__': libpath_py}
exec(compile(open(libpath_py, "rb").read(), libpath_py, 'exec'), libpath, libpath)
LIB_PATH = [os.path.relpath(libfile, CURRENT_DIR) for libfile in libpath['find_lib_path']()]
print("Install libxlearn_api from: %s" % LIB_PATH)
setup(name='xlearn',
version=open(os.path.join(CURRENT_DIR, 'xlearn/VERSION')).read().strip(),
description="xLearn Python Package",
maintainer='Chao Ma',
maintainer_email='mctt90@gmail.com',
zip_safe=False,
packages=find_packages(),
# this will use MANIFEST.in during install where we specify additional files,
# this is the golden line
include_package_data=True,
install_requires=[
"numpy",
"scipy"
],
data_files=[('xlearn', LIB_PATH)],
license='Apache-2.0',
classifiers=['License :: OSI Approved :: Apache Software License'],
url='https://github.com/aksnzhy/xlearn')
| 37.285714
| 92
| 0.708265
|
from __future__ import absolute_import
import sys
import os
from setuptools import setup, find_packages
sys.path.insert(0, '.')
CURRENT_DIR = os.path.dirname(__file__)
libpath_py = os.path.join(CURRENT_DIR, 'xlearn/libpath.py')
libpath = {'__file__': libpath_py}
exec(compile(open(libpath_py, "rb").read(), libpath_py, 'exec'), libpath, libpath)
LIB_PATH = [os.path.relpath(libfile, CURRENT_DIR) for libfile in libpath['find_lib_path']()]
print("Install libxlearn_api from: %s" % LIB_PATH)
setup(name='xlearn',
version=open(os.path.join(CURRENT_DIR, 'xlearn/VERSION')).read().strip(),
description="xLearn Python Package",
maintainer='Chao Ma',
maintainer_email='mctt90@gmail.com',
zip_safe=False,
packages=find_packages(),
include_package_data=True,
install_requires=[
"numpy",
"scipy"
],
data_files=[('xlearn', LIB_PATH)],
license='Apache-2.0',
classifiers=['License :: OSI Approved :: Apache Software License'],
url='https://github.com/aksnzhy/xlearn')
| true
| true
|
790bb8862fefd39331fd276c49da96cfbe269f62
| 2,032
|
py
|
Python
|
src/tso/tsocli/tests/test_cli.py
|
elijah-ward/TSO
|
610565a32284cab23e9262c3431ce6d34116bfcf
|
[
"MIT"
] | 4
|
2018-11-05T21:36:08.000Z
|
2019-04-15T13:05:39.000Z
|
src/tso/tsocli/tests/test_cli.py
|
elijah-ward/TSO
|
610565a32284cab23e9262c3431ce6d34116bfcf
|
[
"MIT"
] | 2
|
2019-02-23T07:13:40.000Z
|
2019-04-07T17:50:44.000Z
|
src/tso/tsocli/tests/test_cli.py
|
elijah-ward/TSO
|
610565a32284cab23e9262c3431ce6d34116bfcf
|
[
"MIT"
] | 2
|
2020-12-09T07:03:09.000Z
|
2021-07-17T02:32:46.000Z
|
"""
CLI tests
"""
from tso.tsocli import __main__ as tsocli
import pytest
from unittest.mock import patch, MagicMock, mock_open
mock_configurqation = "{}"
class TestCli:
def test_cli_should_exit_with_no_args(self):
with pytest.raises(SystemExit) as pytest_wrapped_e:
tsocli.main([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_cli_should_exit_with_only_one_arg(self):
with pytest.raises(SystemExit) as pytest_wrapped_e_pseudo_name:
tsocli.main(['s'])
with pytest.raises(SystemExit) as pytest_wrapped_e_full_name:
tsocli.main(['schedule'])
# Both Exceptions should be the same
assert pytest_wrapped_e_pseudo_name.type == pytest_wrapped_e_full_name.type
assert pytest_wrapped_e_pseudo_name.value.code == pytest_wrapped_e_full_name.value.code
# The exceptions should be a System Exit
assert pytest_wrapped_e_pseudo_name.type == SystemExit
assert pytest_wrapped_e_pseudo_name.value.code == 1
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_call_pipeline_when_successful(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--start-date-time',
'2019-03-01 19:00',
'--end-date-time',
'2019-03-12 19:00',
'--export-to-file',
'--export-to-browser'
])
assert mock_pipeline.called
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_have_default_date_time_values(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--export-to-file'
])
assert mock_pipeline.call_args.start_date_time
assert mock_pipeline.call_args.end_date_time
| 30.787879
| 95
| 0.683071
|
from tso.tsocli import __main__ as tsocli
import pytest
from unittest.mock import patch, MagicMock, mock_open
mock_configurqation = "{}"
class TestCli:
def test_cli_should_exit_with_no_args(self):
with pytest.raises(SystemExit) as pytest_wrapped_e:
tsocli.main([])
assert pytest_wrapped_e.type == SystemExit
assert pytest_wrapped_e.value.code == 1
def test_cli_should_exit_with_only_one_arg(self):
with pytest.raises(SystemExit) as pytest_wrapped_e_pseudo_name:
tsocli.main(['s'])
with pytest.raises(SystemExit) as pytest_wrapped_e_full_name:
tsocli.main(['schedule'])
assert pytest_wrapped_e_pseudo_name.type == pytest_wrapped_e_full_name.type
assert pytest_wrapped_e_pseudo_name.value.code == pytest_wrapped_e_full_name.value.code
assert pytest_wrapped_e_pseudo_name.type == SystemExit
assert pytest_wrapped_e_pseudo_name.value.code == 1
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_call_pipeline_when_successful(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--start-date-time',
'2019-03-01 19:00',
'--end-date-time',
'2019-03-12 19:00',
'--export-to-file',
'--export-to-browser'
])
assert mock_pipeline.called
@patch('configuration.configuration_parser.parse', return_value=mock_configurqation)
@patch('tso.tsocli.command.cli_pipeline')
def test_cli_should_have_default_date_time_values(self, mock_pipeline, mock_config_parser):
tsocli.main([
'schedule',
'--export-to-file'
])
assert mock_pipeline.call_args.start_date_time
assert mock_pipeline.call_args.end_date_time
| true
| true
|
790bb8fbde9d9b9885de31de29198c0e07b9c0c6
| 3,552
|
py
|
Python
|
dask/bag/random.py
|
sdementen/dask
|
781b3eb5626f3cc74c7b4c69187f5cd941513a39
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T02:44:21.000Z
|
2019-01-31T02:44:21.000Z
|
dask/bag/random.py
|
sdementen/dask
|
781b3eb5626f3cc74c7b4c69187f5cd941513a39
|
[
"BSD-3-Clause"
] | 37
|
2020-10-20T08:30:53.000Z
|
2020-12-22T13:15:45.000Z
|
dask/bag/random.py
|
sdementen/dask
|
781b3eb5626f3cc74c7b4c69187f5cd941513a39
|
[
"BSD-3-Clause"
] | 1
|
2019-01-31T02:44:12.000Z
|
2019-01-31T02:44:12.000Z
|
import heapq
import math
import random as rnd
from functools import partial
from .core import Bag
def sample(population, k):
"""Chooses k unique random elements from a bag.
Returns a new bag containing elements from the population while
leaving the original population unchanged.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.sample(b, 3).compute())
[1, 3, 5]
"""
return _sample(population=population, k=k, replace=False)
def choices(population, k=1):
"""
Return a k sized list of elements chosen with replacement.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.choices(b, 3).compute())
[1, 1, 5]
"""
return _sample(population=population, k=k, replace=True)
def _sample(population, k, replace=False):
return population.reduction(
partial(_sample_map_partitions, k=k, replace=replace),
partial(_sample_reduce, k=k, replace=replace),
out_type=Bag,
)
def _sample_map_partitions(population, k, replace):
"""
Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is 1.
Returns
-------
sample: list
List of sampled elements from the partition.
lx: int
Number of elements on the partition.
k: int
Number of elements to sample.
"""
lx = len(population)
real_k = k if k <= lx else lx
sample_func = rnd.choices if replace else rnd.sample
# because otherwise it raises IndexError:
sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)
return sampled, lx
def _sample_reduce(reduce_iter, k, replace):
"""
Reduce function used on the sample and choice functions.
Parameters
----------
reduce_iter : iterable
Each element is a tuple coming generated by the _sample_map_partitions function.
Returns a sequence of uniformly distributed samples;
"""
ns_ks = []
s = []
n = 0
# unfolding reduce outputs
for i in reduce_iter:
(s_i, n_i) = i
s.extend(s_i)
n += n_i
k_i = len(s_i)
ns_ks.append((n_i, k_i))
if k < 0 or (k > n and not replace):
raise ValueError("Sample larger than population or is negative")
# creating the probability array
p = []
for n_i, k_i in ns_ks:
if k_i > 0:
p_i = n_i / (k_i * n)
p += [p_i] * k_i
sample_func = rnd.choices if replace else _weighted_sampling_without_replacement
return sample_func(population=s, weights=p, k=k)
def _weighted_sampling_without_replacement(population, weights, k):
"""
Source:
Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis
"""
elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]
return [population[x[1]] for x in heapq.nlargest(k, elt)]
| 26.117647
| 89
| 0.623592
|
import heapq
import math
import random as rnd
from functools import partial
from .core import Bag
def sample(population, k):
return _sample(population=population, k=k, replace=False)
def choices(population, k=1):
return _sample(population=population, k=k, replace=True)
def _sample(population, k, replace=False):
return population.reduction(
partial(_sample_map_partitions, k=k, replace=replace),
partial(_sample_reduce, k=k, replace=replace),
out_type=Bag,
)
def _sample_map_partitions(population, k, replace):
lx = len(population)
real_k = k if k <= lx else lx
sample_func = rnd.choices if replace else rnd.sample
sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)
return sampled, lx
def _sample_reduce(reduce_iter, k, replace):
ns_ks = []
s = []
n = 0
for i in reduce_iter:
(s_i, n_i) = i
s.extend(s_i)
n += n_i
k_i = len(s_i)
ns_ks.append((n_i, k_i))
if k < 0 or (k > n and not replace):
raise ValueError("Sample larger than population or is negative")
p = []
for n_i, k_i in ns_ks:
if k_i > 0:
p_i = n_i / (k_i * n)
p += [p_i] * k_i
sample_func = rnd.choices if replace else _weighted_sampling_without_replacement
return sample_func(population=s, weights=p, k=k)
def _weighted_sampling_without_replacement(population, weights, k):
elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]
return [population[x[1]] for x in heapq.nlargest(k, elt)]
| true
| true
|
790bb919c8c0cb69f353e3f17445f29461bc75d4
| 17,771
|
py
|
Python
|
twitter_countryGeo/twitter-geo/etool/queue.py
|
nwself/geocoding
|
0919dc2dc209a01a05930bfe21783fc324a584a0
|
[
"MIT"
] | 3
|
2018-03-13T00:51:24.000Z
|
2020-04-01T16:40:01.000Z
|
twitter_countryGeo/twitter-geo/etool/queue.py
|
nwself/geocoding
|
0919dc2dc209a01a05930bfe21783fc324a584a0
|
[
"MIT"
] | 2
|
2020-05-14T01:28:02.000Z
|
2020-09-24T21:56:38.000Z
|
twitter_countryGeo/twitter-geo/etool/queue.py
|
nwself/geocoding
|
0919dc2dc209a01a05930bfe21783fc324a584a0
|
[
"MIT"
] | 4
|
2018-03-13T00:03:48.000Z
|
2020-05-13T18:00:16.000Z
|
#!/usr/bin/env python
import sys
import json
import re
import logging
import os
import os.path
import codecs
import time
import conf
import logs
import kqueue
log = logging.getLogger(__name__)
# constant to select bind() for attaching the socket
BIND = 1
# constant to select connect() for attaching the socket
CONNECT = 2
SERVICE = ""
INITTED = False
KCONNECTION = None
def init(args=None):
# init logger
# load/get the config
# eventually this needs a search path for the config
# should be env(QFU_CONFIG);./queue.conf;/etc/embers/queue.conf;tcp://localhost:3473
# use 3473 as the global control channel
global SERVICE, INITTED
cf = None
conf.init(args)
if args and args.service:
SERVICE = args.service
else:
SERVICE = os.environ.get('UPSTART_JOB', "")
INITTED = True
def connect(force_new=False):
global KCONNECTION
if force_new:
return kqueue.connect()
else:
if not KCONNECTION:
KCONNECTION = kqueue.connect()
return KCONNECTION
class JsonMarshal(object):
def __init__(self, encoding='utf8', **kw):
# raises an error if you get a bogus encoding
codecs.lookup(encoding)
self.encoding = encoding
self.remove_newline = kw.get('remove_newline', False)
def encode(self, obj):
msg = json.dumps(obj, encoding=self.encoding, ensure_ascii=False)
# U+0085(Next Line), U+2028(Line Separator), U+2029(Paragraph Separator)
if self.remove_newline:
msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+', ur'\\n', msg)
#msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+|\\n|\\r|\\f|\\v', '\\n', msg)
#msg = msg.replace("…", '')
if isinstance(msg, str):
msg = unicode(msg)
return msg
def decode(self, data):
return json.loads(data, encoding=self.encoding)
#def send(self, socket, data, flags=0):
# socket.send_unicode(data, encoding=self.encoding, flags=flags)
#def recv(self, socket, flags=0):
# b = socket.recv(flags=flags)
# return unicode(b, encoding=self.encoding, errors='replace')
class UnicodeMarshal(JsonMarshal):
def __init__(self, **kw):
super(UnicodeMarshal, self).__init__(**kw)
def encode(self, obj):
return unicode(obj)
def decode(self, data):
# exception if this is not decodeable (str, stream etc.)
return unicode(data)
# send and recv are handled in JsonMarshall
class RawMarshal(object):
def encode(self, obj):
return obj
def decode(self, obj):
return obj
#def send(self, socket, data, flags=0):
# if isinstance(data, unicode):
# socket.send_unicode(data, flags)
# else:
# socket.send(data, flags=flags)
#def recv(self, socket, flags=0):
# return socket.recv(flags=flags)
class StreamCaptureProbe(object):
def __init__(self, encoding='utf8', stream=sys.stdout):
self._s = codecs.getwriter(encoding)(stream)
self._s.flush() # make sure its good
def __call__(self, action, message):
if action == Queue.SENT:
self._s.write(message)
self._s.write('\n')
self._s.flush()
class QueueStatsProbe(object):
def __init__(self, interval_min=5):
self.interval = datetime.timedelta(minutes=interval_min)
self.start = datetime.datetime.now()
self.sent_bytes = 0
self.sent_msg = 0
self.recv_bytes = 0
self.recv_msg = 0
def __call__(self, action, message):
if action == Queue.SENT:
self.sent_bytes += len(message)
self.sent_msg += 1
if action == Queue.RECEIVED:
self.recv_bytes += len(message)
self.recv_msg += 1
# TODO - if delta past period report the stats
class Queue(object):
"""Docstring for Queue """
SENT = 1
RECEIVED = 2
def __init__(self, ename, mode, qname="", no_ack=True, capture=False,
remove_newline=False, marshal=None, force_new_connection=False):
"""@todo: to be defined
:param ename: @todo
:param mode: @todo
:param qname: @todo
:param no_ack: @todo
:param capture: @todo
:param remove_newline: @todo
"""
if not INITTED:
log.warn("QUEUE INIT Not called, calling")
init()
self._ename = ename
self._mode = mode
self._qname = qname
self._no_ack = no_ack
self._probes = [] # probes for tracing events
self._last_poll = None
self._marshal = marshal or JsonMarshal()
self.connection = connect(force_new_connection)
if not isinstance(self._ename, list):
self._ename = [self._ename]
exclusive = (SERVICE == "")
self._exchanges = [kqueue.Exchange(e[0], type="fanout", durable=False) for e in self._ename]
self._queues = [kqueue.Queue(e[1], ex, exclusive=exclusive)
for e, ex in zip(self._ename, self._exchanges)]
self._name = [e[0] for e in self._ename]
def open(self):
"""@todo: Docstring for open
:returns: @todo
"""
if not INITTED:
init()
if "r" in self._mode:
self._queue = kqueue.KReadQueue(self.connection,
self._queues,
no_ack=self._no_ack,
queue_declare=True)
elif "w" in self._mode:
self._queue = kqueue.KWriteQueue(self.connection,
self._queues[0],
exchange_declare=True)
def read(self):
"""Reads one message from the queue
:returns: @todo
"""
if self._last_poll is not None:
msg = self._last_poll
self._last_poll = None
else:
msg = self._queue.get(block=True)
msg = msg.payload
self.notify(Queue.RECEIVED, msg)
msg = self._marshal.decode(msg)
return msg
def read_without_polling(self):
"""Reads socket without first polling it, guaranteed block if no data
exists.
:returns: @todo
"""
return self.read()
def poll(self, timeout=None, flags=0):
if self._last_poll is not None:
return True
else:
try:
msg = self._queue.get(block=True, timeout=timeout)
except kqueue.Empty:
msg = None
self._last_poll = msg
return self._last_poll is not None
def write(self, data):
"""@todo: Docstring for write
:param data: @todo
:returns: @todo
"""
data = self._marshal.encode(data)
self._queue.put(data)
self.notify(Queue.SENT, data)
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
return ",".join(self._name)
# be an iterator
# http://docs.python.org/library/stdtypes.html#iterator-types
def __iter__(self):
return self
def next(self):
return self.read()
# support contextmanager
# see http://docs.python.org/library/stdtypes.html#context-manager-types
# with queue.open(...) as q: ...
def __enter__(self):
return self
def __exit__(self, ex_type, ex_val, ex_trace):
self.close()
# tell any open control channels we are exiting
return False
def close(self):
"""@todo: Docstring for close
:returns: @todo
"""
pass
# probes for tracing messages
# this is how you can do dumps of messages as they are read/written
# and stuff like collecting metrics on messages
def add_probe(self, probe):
assert hasattr(probe, '__call__'), "Object must be callable."
self._probes.append(probe)
def notify(self, action, msg):
for p in self._probes:
try:
p(action, json.dumps(msg))
except KeyboardInterrupt:
raise
except:
log.exception('Failed to notify probe.')
class StreamQueue(object):
"""
An object to make a stream (typically stdin or stdout)
conform to the Queue interface so we can write code that treats
them interchangeably.
"""
def __init__(self, stream,
mode='r',
name=None,
encoding='utf8',
marshal=JsonMarshal(),
end_of_record='\n',
**ignore):
assert stream, "Need to a stream to read or write to."
assert marshal, "Need a message marshaller to encode and decode messages."
self._marshal = marshal
self.end_of_record = end_of_record
if encoding:
if mode == 'w':
self._stream = codecs.getwriter(encoding)(stream, 'replace')
else: # default read
self._stream = codecs.getreader(encoding)(stream, 'replace')
else: # accept what they give you
self._stream = stream
if not name:
self._name = None
else:
self._name = name
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
l = len(self._name)
if l == 1:
return self._name[0]
elif l > 1:
sout = self._name[0]
for i in range(1, l):
sout = sout + "," + self._name[i]
return sout
else:
return None
def poll(self, timeout=None, flags=0): # zmq.POLLIN):
raise NotImplementedError
def read(self, flags=0):
"""Read the next item from the stream.
This deals with blank lines and EOF by passing
on the values from the stream's read(). Blanks lines
are a string with a newline (and maybe other whitespace)
and EOF is returned as ''. I.e. not s.read() => EOF.
"""
msg = self._stream.readline()
if msg.strip(): # skip empty lines
return self._marshal.decode(msg)
else: # pass it on - blank line is '\n', EOF is ''
return msg
def write(self, obj, flags=0):
if not obj:
return
msg = self._marshal.encode(obj).strip()
self._stream.write(msg)
self._stream.write(self.end_of_record)
def __iter__(self):
self._iter = self._stream.__iter__()
return self
def next(self):
if self._iter:
msg = self._iter.next()
if msg.strip(): # skip blank lines
return self._marshal.decode(msg)
else:
return msg
else:
raise Exception('No iterator initialized')
def close(self): # No action necessary. Stubbed so this class can follow the usage patterns of other I/O classes
return
def __enter__(self):
self._ctx = self._stream.__enter__()
return self._ctx
def __exit__(self, ex_type, ex_val, ex_trace):
if self._ctx:
return self._ctx.__exit__()
else:
return False
def resolve_address(qname, qtype="r", attach=None):
"""
Resolve qname into a queue specification,
either from embers.conf or by treating it as a
fully qualified name if it is not in the conf.
Minimal check on form of fully qualified name.
The attach parameter overrides the default attachment type
(BIND or CONNECT) for queues doing special connections.
"""
#(host, port) = conf.get_queue_info(qname)
if qtype in ("w", ): # (zmq.PUB, zmq.REP):
result = (qname, "")
elif qtype in ("r", ):
result = (qname, SERVICE)
else:
assert False, "Invalid type, Queue no longer supports zmq"
return result
def get_conf_entry(qname):
"""
Return the entire JSON expression for a given qname.
"""
return conf.get_conf_entry(qname)
def open(name, mode='r', capture=False, service=None, exclusive=None, **kw):
"""
Open a queue with file-like semantics. E.g.:
q = open('sample-1', 'w') - publish
q = open('sample-1', 'r') - subscribe
options:
name - a queue name, either a full ZMQ-style URL or a name found in queue.conf
mode - the queue open more. One of r (SUB), w (PUB), r+ (REP), w+ (REQ).
marshal - class to use to marshal messages, default JsonMarshal
capture - capture and log messages as they are sent. Can be True, or a stream, or a Capture instance.
"""
# this is somewhat goofy, but once you have
# a metaphor you might as well run it into the ground
assert mode in {"r", "w"}, 'Mode %s is not a valid mode. Use one of r, w'
typ = mode
service = service or SERVICE
# special case '-' -> use stdin or stdout
if isinstance(name, list) and '-' in name or name == '-':
if mode in ('w', ):
s = sys.stdout
name = 'stdout'
else:
s = sys.stdin
name = 'stdin'
log.info('Reading from stdin' if name == 'stdin' else 'Writing to stdout')
return StreamQueue(s, name=name, mode=mode, **kw)
# normal queue case
if typ in ("w", ):
if not name:
name = conf.get_default_queue_names(service, 'out')
log.info('Writing to %s' % name)
else:
if not name:
name = conf.get_default_queue_names(service, 'in')
log.info('Reading from %s' % name)
if isinstance(name, basestring):
addr = [resolve_address(name,
qtype=typ,
attach=kw.get('attach', None))]
else:
addr = [resolve_address(n,
qtype=typ,
attach=kw.get('attach', None))
for n in name]
if "qname" in kw:
qname = kw["qname"]
addr = [(e[0], qname) for e in addr]
result = Queue(addr, typ, **kw)
assert addr, "Could not resolve an address from %s." % (name,)
result.open()
if capture:
result.add_probe(StreamCaptureProbe())
return result
def main():
"""
A little utility to handle reading and writing streams
to and from a queue.
--pub <queue> : publish what's read from stdin to <queue>
--sub <queue> : read from <queue> and write the messages to stdout
--cat : when used with --pub, write all published messages to stdout
--clean : check in incoming and outgoing messages.
Verify the message is correct JSON and add
an embersId if needed.
--log_file : Path to write the log file to
--log_level : Logging level
Other standard EMBERS options (e.g. --verbose).
"""
import args
import message
global log
ap = args.get_parser()
ap.add_argument('--clean', action="store_true",
help='Verify message format and add standard fields such as embersId.')
ap.add_argument('--addfeed', action="store_true", help='Add feed and feedPath fields to published message.')
ap.add_argument('--cat', action="store_true", help='Write all published messages to stdout.')
ap.add_argument('--rm', nargs="+", help="delete queue")
arg = ap.parse_args()
log = logs.getLogger(log_name=arg.log_file)
logs.init(arg, l=arg.log_level, logfile=arg.log_file)
init(arg)
if arg.rm and not arg.sub:
for queue in arg.rm:
print "Deleting", queue,
queue = kqueue.Queue(queue)
queue.maybe_bind(connect())
queue.delete()
print "."
return
try:
# need to use the raw/utf handler unless we are doing clean
marshal = UnicodeMarshal()
if arg.clean or arg.addfeed:
marshal = JsonMarshal()
if arg.sub is None and os.environ.get('UPSTART_JOB') is None:
arg.sub = '-' # stdin
subq = open(arg.sub, 'r') #, marshal=marshal, ssh_key=arg.ssh_key, ssh_conn=arg.tunnel)
if arg.pub is None and os.environ.get('UPSTART_JOB') is None:
arg.pub = '-' # stdout
pubq = open(arg.pub, 'w', capture=arg.cat, marshal=marshal)
except Exception as e:
log.exception("Exception opening queues: %s" % e)
# "Human-readable" queue name can be retrieved as
#
# sname = subq.get_name()
# pname = pubq.get_name()
rc = 0
try:
it = subq.__iter__()
while True:
m = ''
try:
m = it.next()
if arg.clean:
m = message.clean(m)
if m:
if arg.addfeed:
m = message.add_embers_ids(m, feed=pubq.get_name(), feedPath=pubq.get_name())
pubq.write(m)
except StopIteration:
break
except KeyboardInterrupt:
break
except Exception as e:
rc += 1
if m:
log.exception('Could not process message %s: %s' % (m, e))
else:
log.exception('Unknown processing error %s' % e)
except KeyboardInterrupt:
pass
except Exception as e:
rc = 1
log.exception('Top level exception %s' % e)
return rc
if __name__ == '__main__':
sys.exit(main())
| 30.018581
| 117
| 0.565978
|
import sys
import json
import re
import logging
import os
import os.path
import codecs
import time
import conf
import logs
import kqueue
log = logging.getLogger(__name__)
BIND = 1
CONNECT = 2
SERVICE = ""
INITTED = False
KCONNECTION = None
def init(args=None):
global SERVICE, INITTED
cf = None
conf.init(args)
if args and args.service:
SERVICE = args.service
else:
SERVICE = os.environ.get('UPSTART_JOB', "")
INITTED = True
def connect(force_new=False):
global KCONNECTION
if force_new:
return kqueue.connect()
else:
if not KCONNECTION:
KCONNECTION = kqueue.connect()
return KCONNECTION
class JsonMarshal(object):
def __init__(self, encoding='utf8', **kw):
codecs.lookup(encoding)
self.encoding = encoding
self.remove_newline = kw.get('remove_newline', False)
def encode(self, obj):
msg = json.dumps(obj, encoding=self.encoding, ensure_ascii=False)
if self.remove_newline:
msg = re.sub(ur'[\u0085\u2028\u2029\n\r\f\v]+', ur'\\n', msg)
if isinstance(msg, str):
msg = unicode(msg)
return msg
def decode(self, data):
return json.loads(data, encoding=self.encoding)
class UnicodeMarshal(JsonMarshal):
def __init__(self, **kw):
super(UnicodeMarshal, self).__init__(**kw)
def encode(self, obj):
return unicode(obj)
def decode(self, data):
return unicode(data)
class RawMarshal(object):
def encode(self, obj):
return obj
def decode(self, obj):
return obj
class StreamCaptureProbe(object):
def __init__(self, encoding='utf8', stream=sys.stdout):
self._s = codecs.getwriter(encoding)(stream)
self._s.flush()
def __call__(self, action, message):
if action == Queue.SENT:
self._s.write(message)
self._s.write('\n')
self._s.flush()
class QueueStatsProbe(object):
def __init__(self, interval_min=5):
self.interval = datetime.timedelta(minutes=interval_min)
self.start = datetime.datetime.now()
self.sent_bytes = 0
self.sent_msg = 0
self.recv_bytes = 0
self.recv_msg = 0
def __call__(self, action, message):
if action == Queue.SENT:
self.sent_bytes += len(message)
self.sent_msg += 1
if action == Queue.RECEIVED:
self.recv_bytes += len(message)
self.recv_msg += 1
class Queue(object):
"""Docstring for Queue """
SENT = 1
RECEIVED = 2
def __init__(self, ename, mode, qname="", no_ack=True, capture=False,
remove_newline=False, marshal=None, force_new_connection=False):
"""@todo: to be defined
:param ename: @todo
:param mode: @todo
:param qname: @todo
:param no_ack: @todo
:param capture: @todo
:param remove_newline: @todo
"""
if not INITTED:
log.warn("QUEUE INIT Not called, calling")
init()
self._ename = ename
self._mode = mode
self._qname = qname
self._no_ack = no_ack
self._probes = []
self._last_poll = None
self._marshal = marshal or JsonMarshal()
self.connection = connect(force_new_connection)
if not isinstance(self._ename, list):
self._ename = [self._ename]
exclusive = (SERVICE == "")
self._exchanges = [kqueue.Exchange(e[0], type="fanout", durable=False) for e in self._ename]
self._queues = [kqueue.Queue(e[1], ex, exclusive=exclusive)
for e, ex in zip(self._ename, self._exchanges)]
self._name = [e[0] for e in self._ename]
def open(self):
"""@todo: Docstring for open
:returns: @todo
"""
if not INITTED:
init()
if "r" in self._mode:
self._queue = kqueue.KReadQueue(self.connection,
self._queues,
no_ack=self._no_ack,
queue_declare=True)
elif "w" in self._mode:
self._queue = kqueue.KWriteQueue(self.connection,
self._queues[0],
exchange_declare=True)
def read(self):
"""Reads one message from the queue
:returns: @todo
"""
if self._last_poll is not None:
msg = self._last_poll
self._last_poll = None
else:
msg = self._queue.get(block=True)
msg = msg.payload
self.notify(Queue.RECEIVED, msg)
msg = self._marshal.decode(msg)
return msg
def read_without_polling(self):
"""Reads socket without first polling it, guaranteed block if no data
exists.
:returns: @todo
"""
return self.read()
def poll(self, timeout=None, flags=0):
if self._last_poll is not None:
return True
else:
try:
msg = self._queue.get(block=True, timeout=timeout)
except kqueue.Empty:
msg = None
self._last_poll = msg
return self._last_poll is not None
def write(self, data):
"""@todo: Docstring for write
:param data: @todo
:returns: @todo
"""
data = self._marshal.encode(data)
self._queue.put(data)
self.notify(Queue.SENT, data)
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
return ",".join(self._name)
__(self):
return self
def next(self):
return self.read()
_(self):
return self
def __exit__(self, ex_type, ex_val, ex_trace):
self.close()
return False
def close(self):
"""@todo: Docstring for close
:returns: @todo
"""
pass
def add_probe(self, probe):
assert hasattr(probe, '__call__'), "Object must be callable."
self._probes.append(probe)
def notify(self, action, msg):
for p in self._probes:
try:
p(action, json.dumps(msg))
except KeyboardInterrupt:
raise
except:
log.exception('Failed to notify probe.')
class StreamQueue(object):
"""
An object to make a stream (typically stdin or stdout)
conform to the Queue interface so we can write code that treats
them interchangeably.
"""
def __init__(self, stream,
mode='r',
name=None,
encoding='utf8',
marshal=JsonMarshal(),
end_of_record='\n',
**ignore):
assert stream, "Need to a stream to read or write to."
assert marshal, "Need a message marshaller to encode and decode messages."
self._marshal = marshal
self.end_of_record = end_of_record
if encoding:
if mode == 'w':
self._stream = codecs.getwriter(encoding)(stream, 'replace')
else:
self._stream = codecs.getreader(encoding)(stream, 'replace')
else:
self._stream = stream
if not name:
self._name = None
else:
self._name = name
def get_name(self):
if not self._name:
return None
elif isinstance(self._name, basestring):
return self._name
else:
l = len(self._name)
if l == 1:
return self._name[0]
elif l > 1:
sout = self._name[0]
for i in range(1, l):
sout = sout + "," + self._name[i]
return sout
else:
return None
def poll(self, timeout=None, flags=0):
raise NotImplementedError
def read(self, flags=0):
"""Read the next item from the stream.
This deals with blank lines and EOF by passing
on the values from the stream's read(). Blanks lines
are a string with a newline (and maybe other whitespace)
and EOF is returned as ''. I.e. not s.read() => EOF.
"""
msg = self._stream.readline()
if msg.strip(): # skip empty lines
return self._marshal.decode(msg)
else: # pass it on - blank line is '\n', EOF is ''
return msg
def write(self, obj, flags=0):
if not obj:
return
msg = self._marshal.encode(obj).strip()
self._stream.write(msg)
self._stream.write(self.end_of_record)
def __iter__(self):
self._iter = self._stream.__iter__()
return self
def next(self):
if self._iter:
msg = self._iter.next()
if msg.strip(): # skip blank lines
return self._marshal.decode(msg)
else:
return msg
else:
raise Exception('No iterator initialized')
def close(self): # No action necessary. Stubbed so this class can follow the usage patterns of other I/O classes
return
def __enter__(self):
self._ctx = self._stream.__enter__()
return self._ctx
def __exit__(self, ex_type, ex_val, ex_trace):
if self._ctx:
return self._ctx.__exit__()
else:
return False
def resolve_address(qname, qtype="r", attach=None):
"""
Resolve qname into a queue specification,
either from embers.conf or by treating it as a
fully qualified name if it is not in the conf.
Minimal check on form of fully qualified name.
The attach parameter overrides the default attachment type
(BIND or CONNECT) for queues doing special connections.
"""
#(host, port) = conf.get_queue_info(qname)
if qtype in ("w", ): # (zmq.PUB, zmq.REP):
result = (qname, "")
elif qtype in ("r", ):
result = (qname, SERVICE)
else:
assert False, "Invalid type, Queue no longer supports zmq"
return result
def get_conf_entry(qname):
"""
Return the entire JSON expression for a given qname.
"""
return conf.get_conf_entry(qname)
def open(name, mode='r', capture=False, service=None, exclusive=None, **kw):
"""
Open a queue with file-like semantics. E.g.:
q = open('sample-1', 'w') - publish
q = open('sample-1', 'r') - subscribe
options:
name - a queue name, either a full ZMQ-style URL or a name found in queue.conf
mode - the queue open more. One of r (SUB), w (PUB), r+ (REP), w+ (REQ).
marshal - class to use to marshal messages, default JsonMarshal
capture - capture and log messages as they are sent. Can be True, or a stream, or a Capture instance.
"""
# this is somewhat goofy, but once you have
# a metaphor you might as well run it into the ground
assert mode in {"r", "w"}, 'Mode %s is not a valid mode. Use one of r, w'
typ = mode
service = service or SERVICE
# special case '-' -> use stdin or stdout
if isinstance(name, list) and '-' in name or name == '-':
if mode in ('w', ):
s = sys.stdout
name = 'stdout'
else:
s = sys.stdin
name = 'stdin'
log.info('Reading from stdin' if name == 'stdin' else 'Writing to stdout')
return StreamQueue(s, name=name, mode=mode, **kw)
# normal queue case
if typ in ("w", ):
if not name:
name = conf.get_default_queue_names(service, 'out')
log.info('Writing to %s' % name)
else:
if not name:
name = conf.get_default_queue_names(service, 'in')
log.info('Reading from %s' % name)
if isinstance(name, basestring):
addr = [resolve_address(name,
qtype=typ,
attach=kw.get('attach', None))]
else:
addr = [resolve_address(n,
qtype=typ,
attach=kw.get('attach', None))
for n in name]
if "qname" in kw:
qname = kw["qname"]
addr = [(e[0], qname) for e in addr]
result = Queue(addr, typ, **kw)
assert addr, "Could not resolve an address from %s." % (name,)
result.open()
if capture:
result.add_probe(StreamCaptureProbe())
return result
def main():
"""
A little utility to handle reading and writing streams
to and from a queue.
--pub <queue> : publish what's read from stdin to <queue>
--sub <queue> : read from <queue> and write the messages to stdout
--cat : when used with --pub, write all published messages to stdout
--clean : check in incoming and outgoing messages.
Verify the message is correct JSON and add
an embersId if needed.
--log_file : Path to write the log file to
--log_level : Logging level
Other standard EMBERS options (e.g. --verbose).
"""
import args
import message
global log
ap = args.get_parser()
ap.add_argument('--clean', action="store_true",
help='Verify message format and add standard fields such as embersId.')
ap.add_argument('--addfeed', action="store_true", help='Add feed and feedPath fields to published message.')
ap.add_argument('--cat', action="store_true", help='Write all published messages to stdout.')
ap.add_argument('--rm', nargs="+", help="delete queue")
arg = ap.parse_args()
log = logs.getLogger(log_name=arg.log_file)
logs.init(arg, l=arg.log_level, logfile=arg.log_file)
init(arg)
if arg.rm and not arg.sub:
for queue in arg.rm:
print "Deleting", queue,
queue = kqueue.Queue(queue)
queue.maybe_bind(connect())
queue.delete()
print "."
return
try:
marshal = UnicodeMarshal()
if arg.clean or arg.addfeed:
marshal = JsonMarshal()
if arg.sub is None and os.environ.get('UPSTART_JOB') is None:
arg.sub = '-'
subq = open(arg.sub, 'r')
if arg.pub is None and os.environ.get('UPSTART_JOB') is None:
arg.pub = '-'
pubq = open(arg.pub, 'w', capture=arg.cat, marshal=marshal)
except Exception as e:
log.exception("Exception opening queues: %s" % e)
rc = 0
try:
it = subq.__iter__()
while True:
m = ''
try:
m = it.next()
if arg.clean:
m = message.clean(m)
if m:
if arg.addfeed:
m = message.add_embers_ids(m, feed=pubq.get_name(), feedPath=pubq.get_name())
pubq.write(m)
except StopIteration:
break
except KeyboardInterrupt:
break
except Exception as e:
rc += 1
if m:
log.exception('Could not process message %s: %s' % (m, e))
else:
log.exception('Unknown processing error %s' % e)
except KeyboardInterrupt:
pass
except Exception as e:
rc = 1
log.exception('Top level exception %s' % e)
return rc
if __name__ == '__main__':
sys.exit(main())
| false
| true
|
790bb98d4b406927f9ab352919465ce9328484e3
| 13,536
|
py
|
Python
|
benchmarks/ltl_timed_transition_system/token_ring/f3/token_ring_0024.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/ltl_timed_transition_system/token_ring/f3/token_ring_0024.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/ltl_timed_transition_system/token_ring/f3/token_ring_0024.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from collections import Iterable
from itertools import combinations
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
transm_time, x_transm_time = decl_consts(menv, "tot_transm_time",
real_type)
curr2next = {delta: x_delta, transm_time: x_transm_time}
mgr = TokenManager("mgr", menv, enc, delta)
stations = [Station("st{}".format(i), menv, enc, mgr, delta)
for i in range(num_procs)]
for s, x_s in mgr.symb2next.items():
curr2next[s] = x_s
for comp in stations:
for s, x_s in comp.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
# init: tot_transm_time = 0
init = msat_make_equal(menv, transm_time, zero)
# invar: delta >= 0
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_geq(menv, x_delta, zero)
# only 1 station moves
for s0, s1 in combinations(stations, 2):
trans = msat_make_and(menv, trans,
msat_make_or(menv, s0.stutter, s1.stutter))
# sync stations and mgr
st_acquire = stations[0].acquire
for st in stations[1:]:
st_acquire = msat_make_or(menv, st_acquire, st.acquire)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.acquire, st_acquire))
st_release = stations[0].release
for st in stations[1:]:
st_release = msat_make_or(menv, st_release, st.release)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.release, st_release))
# (mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time + mgr.c
lhs = msat_make_and(menv, mgr.counting, mgr.x_idle)
rhs = msat_make_equal(menv, x_transm_time,
msat_make_plus(menv, transm_time, mgr.c))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# !(mgr.counting & mgr.idle') -> total_transm_time' = total_transm_time
lhs = msat_make_not(menv, lhs)
rhs = msat_make_equal(menv, x_transm_time, transm_time)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
init = msat_make_and(menv, init, mgr.init)
trans = msat_make_and(menv, trans, mgr.trans)
for s in stations:
init = msat_make_and(menv, init, s.init)
trans = msat_make_and(menv, trans, s.trans)
# (G F (mgr.counting & mgr.idle')) -> G F total_transm_time < 10
lhs = enc.make_G(enc.make_F(msat_make_and(menv, mgr.counting,
enc.make_X(mgr.idle))))
rhs = msat_make_lt(menv, transm_time, msat_make_number(menv, "10"))
rhs = enc.make_G(enc.make_F(rhs))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
"""Synchronous component"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class TokenManager(Module):
"""TokenManager module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
c, x_c = self._symb("c", real_type)
timeout, x_timeout = self._symb("timeout", real_type)
self.timeout = timeout
self.x_timeout = x_timeout
self.c = c
self.idle = loc
self.counting = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_counting = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, c: x_c, timeout: x_timeout}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle & c = 0 & timeout = 0
self.init = msat_make_and(
menv,
msat_make_and(menv, self.init, self.idle),
msat_make_and(menv,
msat_make_equal(menv, c, zero),
msat_make_equal(menv, timeout, zero)))
# invar: counting -> c <= timeout
rhs = msat_make_leq(menv, c, timeout)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, self.counting, rhs))
rhs = msat_make_leq(menv, x_c, x_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, self.x_counting, rhs))
# (delta > 0 | stutter) -> c' = c + delta & l' = l & timeout' = timeout
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_and(menv, msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))),
msat_make_equal(menv, x_timeout, timeout))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & counting' & c' = 0)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(menv, self.acquire,
msat_make_and(menv, self.x_counting,
msat_make_equal(menv, x_c, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (counting) -> (release & idle' & c' = 0 & timeout' = 0)
lhs = msat_make_and(menv, disc_t, self.counting)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.x_idle, self.release),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_timeout, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Station(Module):
"""Station module"""
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, mgr, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
req_time, x_req_time = self._symb("req_time", real_type)
self.idle = loc
self.transm = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_transm = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, req_time: x_req_time}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle
self.init = msat_make_and(menv, self.init, self.idle)
# invar: req_time > 0
self.init = msat_make_and(menv, self.init,
msat_make_gt(menv, req_time, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_gt(menv, x_req_time, zero))
# (delta > 0 | stutter) -> l' = l & req_time' = req_time
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_req_time, req_time))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & transm' & mgr.timeout' = req_time & req_time' = req_time)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.acquire, self.x_transm),
msat_make_and(menv,
msat_make_equal(menv, mgr.x_timeout, req_time),
msat_make_equal(menv, x_req_time, req_time)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (transm) -> (release & mgr.c > 0 & idle')
lhs = msat_make_and(menv, disc_t, self.transm)
rhs = msat_make_and(
menv, self.release,
msat_make_and(menv, msat_make_gt(menv, mgr.c, zero), self.x_idle))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
| 39.578947
| 88
| 0.591534
|
from collections import Iterable
from itertools import combinations
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next
num_procs = 24
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type):
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
real_type = msat_get_rational_type(menv)
delta, x_delta = decl_consts(menv, delta_name, real_type)
transm_time, x_transm_time = decl_consts(menv, "tot_transm_time",
real_type)
curr2next = {delta: x_delta, transm_time: x_transm_time}
mgr = TokenManager("mgr", menv, enc, delta)
stations = [Station("st{}".format(i), menv, enc, mgr, delta)
for i in range(num_procs)]
for s, x_s in mgr.symb2next.items():
curr2next[s] = x_s
for comp in stations:
for s, x_s in comp.symb2next.items():
assert s not in curr2next.keys()
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_equal(menv, transm_time, zero)
init = msat_make_and(menv, init, msat_make_geq(menv, delta, zero))
trans = msat_make_geq(menv, x_delta, zero)
for s0, s1 in combinations(stations, 2):
trans = msat_make_and(menv, trans,
msat_make_or(menv, s0.stutter, s1.stutter))
st_acquire = stations[0].acquire
for st in stations[1:]:
st_acquire = msat_make_or(menv, st_acquire, st.acquire)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.acquire, st_acquire))
st_release = stations[0].release
for st in stations[1:]:
st_release = msat_make_or(menv, st_release, st.release)
trans = msat_make_and(menv, trans,
msat_make_iff(menv, mgr.release, st_release))
lhs = msat_make_and(menv, mgr.counting, mgr.x_idle)
rhs = msat_make_equal(menv, x_transm_time,
msat_make_plus(menv, transm_time, mgr.c))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_not(menv, lhs)
rhs = msat_make_equal(menv, x_transm_time, transm_time)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
init = msat_make_and(menv, init, mgr.init)
trans = msat_make_and(menv, trans, mgr.trans)
for s in stations:
init = msat_make_and(menv, init, s.init)
trans = msat_make_and(menv, trans, s.trans)
lhs = enc.make_G(enc.make_F(msat_make_and(menv, mgr.counting,
enc.make_X(mgr.idle))))
rhs = msat_make_lt(menv, transm_time, msat_make_number(menv, "10"))
rhs = enc.make_G(enc.make_F(rhs))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(self.menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(self._symb(c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(self.menv, b_vars[idx][0]),
msat_make_not(self.menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(self.menv, pred, it[0])
x_pred = msat_make_and(self.menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
class TokenManager(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
c, x_c = self._symb("c", real_type)
timeout, x_timeout = self._symb("timeout", real_type)
self.timeout = timeout
self.x_timeout = x_timeout
self.c = c
self.idle = loc
self.counting = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_counting = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, c: x_c, timeout: x_timeout}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle & c = 0 & timeout = 0
self.init = msat_make_and(
menv,
msat_make_and(menv, self.init, self.idle),
msat_make_and(menv,
msat_make_equal(menv, c, zero),
msat_make_equal(menv, timeout, zero)))
# invar: counting -> c <= timeout
rhs = msat_make_leq(menv, c, timeout)
self.init = msat_make_and(menv, self.init,
msat_make_impl(menv, self.counting, rhs))
rhs = msat_make_leq(menv, x_c, x_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, self.x_counting, rhs))
# (delta > 0 | stutter) -> c' = c + delta & l' = l & timeout' = timeout
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_and(menv, msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))),
msat_make_equal(menv, x_timeout, timeout))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(menv, self.acquire,
msat_make_and(menv, self.x_counting,
msat_make_equal(menv, x_c, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.counting)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.x_idle, self.release),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_timeout, zero)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Station(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder, mgr, delta):
super().__init__(name, menv, enc)
real_type = msat_get_rational_type(menv)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt_symbs, evts, x_evts = self._enum("evt", 3)
req_time, x_req_time = self._symb("req_time", real_type)
self.idle = loc
self.transm = msat_make_not(menv, loc)
self.x_idle = x_loc
self.x_transm = msat_make_not(menv, x_loc)
self.acquire = evts[0]
self.release = evts[1]
self.stutter = evts[2]
self.symb2next = {loc: x_loc, req_time: x_req_time}
for s, x_s in evt_symbs:
assert s not in self.symb2next
self.symb2next[s] = x_s
zero = msat_make_number(menv, "0")
# bound evt
bound_evt = evts[0]
x_bound_evt = x_evts[0]
for evt, x_evt in zip(evts[1:], x_evts[1:]):
bound_evt = msat_make_or(menv, bound_evt, evt)
x_bound_evt = msat_make_or(menv, x_bound_evt, x_evt)
self.init = bound_evt
self.trans = x_bound_evt
# idle
self.init = msat_make_and(menv, self.init, self.idle)
# invar: req_time > 0
self.init = msat_make_and(menv, self.init,
msat_make_gt(menv, req_time, zero))
self.trans = msat_make_and(menv, self.trans,
msat_make_gt(menv, x_req_time, zero))
# (delta > 0 | stutter) -> l' = l & req_time' = req_time
lhs = msat_make_or(menv, self.stutter,
msat_make_gt(menv, delta, zero))
rhs = msat_make_and(
menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_req_time, req_time))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, msat_make_equal(menv, delta, zero),
msat_make_or(menv, self.acquire, self.release))
# (idle) -> (acquire & transm' & mgr.timeout' = req_time & req_time' = req_time)
lhs = msat_make_and(menv, disc_t, self.idle)
rhs = msat_make_and(
menv,
msat_make_and(menv, self.acquire, self.x_transm),
msat_make_and(menv,
msat_make_equal(menv, mgr.x_timeout, req_time),
msat_make_equal(menv, x_req_time, req_time)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
lhs = msat_make_and(menv, disc_t, self.transm)
rhs = msat_make_and(
menv, self.release,
msat_make_and(menv, msat_make_gt(menv, mgr.c, zero), self.x_idle))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
| true
| true
|
790bba6a4a4c6bee5551899edc7836f9eefab95a
| 1,526
|
py
|
Python
|
packages/jet_bridge_base/jet_bridge_base/views/register.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | 2
|
2020-04-18T14:34:44.000Z
|
2020-04-18T14:34:47.000Z
|
packages/jet_bridge_base/jet_bridge_base/views/register.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | null | null | null |
packages/jet_bridge_base/jet_bridge_base/views/register.py
|
bokal2/jet-bridge
|
dddc4f55c2d5a28c02ce9515dffc750e3887450f
|
[
"MIT"
] | null | null | null |
from six.moves.urllib_parse import quote
from jet_bridge_base import settings
from jet_bridge_base.responses.base import Response
from jet_bridge_base.responses.redirect import RedirectResponse
from jet_bridge_base.status import HTTP_400_BAD_REQUEST
from jet_bridge_base.views.base.api import APIView
class RegisterView(APIView):
def get(self, *args, **kwargs):
if not settings.PROJECT:
return Response('Project name is not set', status=HTTP_400_BAD_REQUEST)
if not settings.TOKEN:
return Response('Project token is not set', status=HTTP_400_BAD_REQUEST)
token = self.request.get_argument('token', '')
install_type = self.request.get_argument('install_type', '')
if settings.WEB_BASE_URL.startswith('https') and not self.request.full_url().startswith('https'):
web_base_url = 'http{}'.format(settings.WEB_BASE_URL[5:])
else:
web_base_url = settings.WEB_BASE_URL
if token:
url = '{}/projects/register/{}'.format(web_base_url, token)
else:
url = '{}/projects/register'.format(web_base_url)
parameters = [
['project', settings.PROJECT],
['referrer', self.request.full_url().encode('utf8')],
]
if install_type:
parameters.append(['install_type', install_type])
query_string = '&'.join(map(lambda x: '{}={}'.format(x[0], quote(x[1])), parameters))
return RedirectResponse('%s?%s' % (url, query_string))
| 35.488372
| 105
| 0.659895
|
from six.moves.urllib_parse import quote
from jet_bridge_base import settings
from jet_bridge_base.responses.base import Response
from jet_bridge_base.responses.redirect import RedirectResponse
from jet_bridge_base.status import HTTP_400_BAD_REQUEST
from jet_bridge_base.views.base.api import APIView
class RegisterView(APIView):
def get(self, *args, **kwargs):
if not settings.PROJECT:
return Response('Project name is not set', status=HTTP_400_BAD_REQUEST)
if not settings.TOKEN:
return Response('Project token is not set', status=HTTP_400_BAD_REQUEST)
token = self.request.get_argument('token', '')
install_type = self.request.get_argument('install_type', '')
if settings.WEB_BASE_URL.startswith('https') and not self.request.full_url().startswith('https'):
web_base_url = 'http{}'.format(settings.WEB_BASE_URL[5:])
else:
web_base_url = settings.WEB_BASE_URL
if token:
url = '{}/projects/register/{}'.format(web_base_url, token)
else:
url = '{}/projects/register'.format(web_base_url)
parameters = [
['project', settings.PROJECT],
['referrer', self.request.full_url().encode('utf8')],
]
if install_type:
parameters.append(['install_type', install_type])
query_string = '&'.join(map(lambda x: '{}={}'.format(x[0], quote(x[1])), parameters))
return RedirectResponse('%s?%s' % (url, query_string))
| true
| true
|
790bbaf8e6b6e3e030475b1cf8154b1be53ed58b
| 21
|
py
|
Python
|
examples/permissionsexample/models.py
|
max-arnold/django-rest-framework
|
ce5eb85082dd775bb5079ae7af91840fba7f9a6e
|
[
"BSD-2-Clause"
] | 2
|
2017-12-05T15:32:58.000Z
|
2017-12-05T15:33:02.000Z
|
examples/permissionsexample/models.py
|
upgrade-drf/django-rest-framework-0.4
|
ce5eb85082dd775bb5079ae7af91840fba7f9a6e
|
[
"BSD-2-Clause"
] | null | null | null |
examples/permissionsexample/models.py
|
upgrade-drf/django-rest-framework-0.4
|
ce5eb85082dd775bb5079ae7af91840fba7f9a6e
|
[
"BSD-2-Clause"
] | 1
|
2020-12-18T11:24:55.000Z
|
2020-12-18T11:24:55.000Z
|
#for fixture loading
| 10.5
| 20
| 0.809524
| true
| true
|
|
790bbcaa506bdbe2a86a708c00fe73aefd26746c
| 2,286
|
py
|
Python
|
contrib/thrifty/tests/python/pants_test/pants/contrib/thrifty/test_thrifty_gen.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
contrib/thrifty/tests/python/pants_test/pants/contrib/thrifty/test_thrifty_gen.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
contrib/thrifty/tests/python/pants_test/pants/contrib/thrifty/test_thrifty_gen.py
|
anthonyjpratti/pants
|
d98e53af6ddd877861231bce8343f8204da0a9d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen.wire.java.register import build_file_aliases as register_codegen
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.build_graph.register import build_file_aliases as register_core
from pants.java.jar.jar_dependency import JarDependency
from pants.testutil.task_test_base import TaskTestBase
from pants.contrib.thrifty.java_thrifty_gen import JavaThriftyGen
from pants.contrib.thrifty.java_thrifty_library import JavaThriftyLibrary
class JavaThriftyGenTest(TaskTestBase):
TARGET_WORKDIR = ".pants.d/bogus/workdir"
@classmethod
def task_type(cls):
return JavaThriftyGen
@classmethod
def alias_groups(cls):
return register_core().merge(register_codegen())
def _create_fake_thrifty_tool(self):
self.make_target(':thrifty-compiler', JarLibrary, jars=[
JarDependency(org='com.microsoft.thrifty', name='thrifty-compiler', rev='0.4.3'),
])
def test_compiler_args(self):
self._create_fake_thrifty_tool()
target = self.make_target('src/thrifty:simple-thrifty-target', JavaThriftyLibrary,
sources=['foo.thrift'])
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/foo.thrift'],
task.format_args_for_target(target, self.TARGET_WORKDIR))
def test_compiler_args_deps(self):
self._create_fake_thrifty_tool()
upstream = self.make_target('src/thrifty:upstream', JavaThriftyLibrary,
sources=['upstream.thrift'])
downstream = self.make_target('src/thrifty:downstream', JavaThriftyLibrary,
sources=['downstream.thrift'], dependencies=[upstream])
context = self.context(target_roots=[upstream, downstream])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/downstream.thrift'],
task.format_args_for_target(downstream, self.TARGET_WORKDIR))
| 41.563636
| 91
| 0.728346
|
from pants.backend.codegen.wire.java.register import build_file_aliases as register_codegen
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.build_graph.register import build_file_aliases as register_core
from pants.java.jar.jar_dependency import JarDependency
from pants.testutil.task_test_base import TaskTestBase
from pants.contrib.thrifty.java_thrifty_gen import JavaThriftyGen
from pants.contrib.thrifty.java_thrifty_library import JavaThriftyLibrary
class JavaThriftyGenTest(TaskTestBase):
TARGET_WORKDIR = ".pants.d/bogus/workdir"
@classmethod
def task_type(cls):
return JavaThriftyGen
@classmethod
def alias_groups(cls):
return register_core().merge(register_codegen())
def _create_fake_thrifty_tool(self):
self.make_target(':thrifty-compiler', JarLibrary, jars=[
JarDependency(org='com.microsoft.thrifty', name='thrifty-compiler', rev='0.4.3'),
])
def test_compiler_args(self):
self._create_fake_thrifty_tool()
target = self.make_target('src/thrifty:simple-thrifty-target', JavaThriftyLibrary,
sources=['foo.thrift'])
context = self.context(target_roots=[target])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/foo.thrift'],
task.format_args_for_target(target, self.TARGET_WORKDIR))
def test_compiler_args_deps(self):
self._create_fake_thrifty_tool()
upstream = self.make_target('src/thrifty:upstream', JavaThriftyLibrary,
sources=['upstream.thrift'])
downstream = self.make_target('src/thrifty:downstream', JavaThriftyLibrary,
sources=['downstream.thrift'], dependencies=[upstream])
context = self.context(target_roots=[upstream, downstream])
task = self.create_task(context)
self.assertEqual([
'--out={}'.format(self.TARGET_WORKDIR),
'--path={}/src/thrifty'.format(self.build_root),
'src/thrifty/downstream.thrift'],
task.format_args_for_target(downstream, self.TARGET_WORKDIR))
| true
| true
|
790bbcd10e63835d6ffdac40f3ed37d1ddd0aa78
| 1,622
|
py
|
Python
|
app/core/tests/test_models.py
|
Rish1711/recipe-app-api
|
eb0c6b6009d8696ae1f7652f2546c1d0d8dde4d0
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
Rish1711/recipe-app-api
|
eb0c6b6009d8696ae1f7652f2546c1d0d8dde4d0
|
[
"MIT"
] | null | null | null |
app/core/tests/test_models.py
|
Rish1711/recipe-app-api
|
eb0c6b6009d8696ae1f7652f2546c1d0d8dde4d0
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='rg171195@gmail.com', password='testpass'):
'''Creating sample user'''
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
"""Testing weather email is in normalize form or not"""
email = "test@XYZ.com"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_email_validation(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_superuser(self):
"""Test for creating super user"""
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_superuser(
email=email,
password=password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_str(self):
tag = models.Tag.objects.create(user=sample_user(), name='vegan')
self.assertEqual(str(tag), tag.name)
| 33.102041
| 73
| 0.658446
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='rg171195@gmail.com', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_email_normalize(self):
email = "test@XYZ.com"
user = get_user_model().objects.create_user(email, "test123")
self.assertEqual(user.email, email.lower())
def test_email_validation(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_superuser(self):
email = 'rg171195@gmail.com'
password = 'Password123'
user = get_user_model().objects.create_superuser(
email=email,
password=password
)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
def test_tag_str(self):
tag = models.Tag.objects.create(user=sample_user(), name='vegan')
self.assertEqual(str(tag), tag.name)
| true
| true
|
790bbcf99152c50fb5c331ca5fc517729a6d1dc6
| 3,832
|
py
|
Python
|
tests/test_aiprolog.py
|
0zAND1z/zamia-ai
|
d9e9c6123fdadca3fae55e87ea2b2b32d82bc210
|
[
"Apache-2.0"
] | 129
|
2017-03-23T14:20:33.000Z
|
2022-01-03T01:52:22.000Z
|
tests/test_aiprolog.py
|
0zAND1z/zamia-ai
|
d9e9c6123fdadca3fae55e87ea2b2b32d82bc210
|
[
"Apache-2.0"
] | 6
|
2017-03-09T22:32:55.000Z
|
2021-05-13T19:07:48.000Z
|
tests/test_aiprolog.py
|
gooofy/voxforge
|
da21be38e976aae67214537a27a30541afd3b5aa
|
[
"Apache-2.0"
] | 22
|
2017-04-07T15:44:05.000Z
|
2022-03-13T02:41:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Guenter Bartsch, Heiko Schaefer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import logging
import codecs
from nltools import misc
from sqlalchemy.orm import sessionmaker
from zamiaai import model
from zamiaprolog.logicdb import LogicDB
from aiprolog.runtime import AIPrologRuntime
from aiprolog.parser import AIPrologParser
UNITTEST_MODULE = 'unittests'
UNITTEST_CONTEXT = 'unittests'
class TestAIProlog (unittest.TestCase):
def setUp(self):
config = misc.load_config('.airc')
#
# logic DB
#
self.db = LogicDB(model.url)
#
# aiprolog environment setup
#
self.prolog_rt = AIPrologRuntime(self.db)
self.parser = AIPrologParser(self.db)
self.prolog_rt.set_trace(True)
self.db.clear_module(UNITTEST_MODULE)
# @unittest.skip("temporarily disabled")
def test_tokenize(self):
clause = self.parser.parse_line_clause_body("tokenize (de, 'hallo, welt!', X)")
logging.debug('clause: %s' % clause)
solutions = self.prolog_rt.search(clause)
logging.debug('solutions: %s' % repr(solutions))
self.assertEqual (len(solutions), 1)
self.assertEqual (len(solutions[0]['X'].l), 2)
# @unittest.skip("temporarily disabled")
def test_edit_distance(self):
clause = self.parser.parse_line_clause_body("edit_distance (['hallo', 'welt'], ['hallo', 'springfield'], X)")
logging.debug('clause: %s' % clause)
solutions = self.prolog_rt.search(clause)
logging.debug('solutions: %s' % repr(solutions))
self.assertEqual (len(solutions), 1)
self.assertEqual (solutions[0]['X'].f, 1.0)
# class TestMacroEngine (unittest.TestCase):
#
# def setUp(self):
# Session = sessionmaker(bind=model.engine)
# self.session = Session()
#
# def testLocalMacros(self):
#
# me = NLPMacroEngine(self.session)
# discourses = me.macro_expand('de', u'(HAL,|Computer,|Du,|) (Ich bin|Ich fühle mich|Man bin ich|Da bin ich) (zufrieden|so zufrieden|glücklich|so glücklich|froh|so froh)', u'', None)
#
# self.assertEqual(len(discourses), 96)
#
# def testMacroTokens(self):
#
# me = NLPMacroEngine(self.session)
# discourses = me.macro_expand('de', u'hallo (HAL|Computer|Du|lieber computer|) wie geht es dir (heute|)',
# u'foo @MACRO_0:TSTART_W_0 bar @MACRO_0:TEND_W_0 @MACRO_0:W baz @MACRO_1:TEND_W_0?', None)
#
# self.assertEqual(len(discourses), 10)
# self.assertEqual(discourses[0][1], u'foo 1 bar 2 HAL baz 7?')
#
# discourses = me.macro_expand('de', u'foobar what is the full name of (foo|donald trump)',
# u'foo @MACRO_0:TSTART_W_0 bar @MACRO_0:TEND_W_0', None)
#
# self.assertEqual(len(discourses), 2)
# self.assertEqual(discourses[0][1], u'foo 7 bar 8')
# self.assertEqual(discourses[1][1], u'foo 7 bar 9')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
unittest.main()
| 32.752137
| 190
| 0.640919
|
import unittest
import logging
import codecs
from nltools import misc
from sqlalchemy.orm import sessionmaker
from zamiaai import model
from zamiaprolog.logicdb import LogicDB
from aiprolog.runtime import AIPrologRuntime
from aiprolog.parser import AIPrologParser
UNITTEST_MODULE = 'unittests'
UNITTEST_CONTEXT = 'unittests'
class TestAIProlog (unittest.TestCase):
def setUp(self):
config = misc.load_config('.airc')
self.db = LogicDB(model.url)
self.prolog_rt = AIPrologRuntime(self.db)
self.parser = AIPrologParser(self.db)
self.prolog_rt.set_trace(True)
self.db.clear_module(UNITTEST_MODULE)
def test_tokenize(self):
clause = self.parser.parse_line_clause_body("tokenize (de, 'hallo, welt!', X)")
logging.debug('clause: %s' % clause)
solutions = self.prolog_rt.search(clause)
logging.debug('solutions: %s' % repr(solutions))
self.assertEqual (len(solutions), 1)
self.assertEqual (len(solutions[0]['X'].l), 2)
def test_edit_distance(self):
clause = self.parser.parse_line_clause_body("edit_distance (['hallo', 'welt'], ['hallo', 'springfield'], X)")
logging.debug('clause: %s' % clause)
solutions = self.prolog_rt.search(clause)
logging.debug('solutions: %s' % repr(solutions))
self.assertEqual (len(solutions), 1)
self.assertEqual (solutions[0]['X'].f, 1.0)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING)
unittest.main()
| true
| true
|
790bbe8b5ec1f0ff832e78f115d25819d3f1882b
| 2,646
|
py
|
Python
|
IMLearn/learners/regressors/polynomial_fitting.py
|
shirlevy007/IML.HUJI
|
07e9db86f83925719242d20de52e65d2fe3786ce
|
[
"MIT"
] | null | null | null |
IMLearn/learners/regressors/polynomial_fitting.py
|
shirlevy007/IML.HUJI
|
07e9db86f83925719242d20de52e65d2fe3786ce
|
[
"MIT"
] | null | null | null |
IMLearn/learners/regressors/polynomial_fitting.py
|
shirlevy007/IML.HUJI
|
07e9db86f83925719242d20de52e65d2fe3786ce
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
# import linear_regression
class PolynomialFitting(BaseEstimator):
"""
Polynomial Fitting using Least Squares estimation
"""
def __init__(self, k: int) -> PolynomialFitting:
"""
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
"""
super().__init__()
self.deg_ = k
self.vander_, self.vander_linear_ = None, LinearRegression(False)
# raise NotImplementedError()
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
# self.vander_ = np.vander(X, self.deg_, increasing=True)
self.vander_linear_.fit(self.__transform(X), y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
return self.vander_linear_.predict(self.__transform(X))
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
return self.vander_linear_.loss(self.__transform(X), y)
def __transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform given input according to the univariate polynomial transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
"""
X_vander = np.vander(X, self.deg_ + 1, increasing=True)
return X_vander
| 28.451613
| 83
| 0.586546
|
from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
class PolynomialFitting(BaseEstimator):
def __init__(self, k: int) -> PolynomialFitting:
super().__init__()
self.deg_ = k
self.vander_, self.vander_linear_ = None, LinearRegression(False)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
self.vander_linear_.fit(self.__transform(X), y)
def _predict(self, X: np.ndarray) -> np.ndarray:
return self.vander_linear_.predict(self.__transform(X))
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
return self.vander_linear_.loss(self.__transform(X), y)
def __transform(self, X: np.ndarray) -> np.ndarray:
X_vander = np.vander(X, self.deg_ + 1, increasing=True)
return X_vander
| true
| true
|
790bbe8e7236044b2723c1ebc12825c204b727f0
| 49,477
|
py
|
Python
|
mindspore/nn/layer/conv.py
|
Rossil2012/mindspore
|
8a20b5d784b3fec6d32e058581ec56ec553a06a0
|
[
"Apache-2.0"
] | 1
|
2021-04-23T06:35:18.000Z
|
2021-04-23T06:35:18.000Z
|
mindspore/nn/layer/conv.py
|
Rossil2012/mindspore
|
8a20b5d784b3fec6d32e058581ec56ec553a06a0
|
[
"Apache-2.0"
] | null | null | null |
mindspore/nn/layer/conv.py
|
Rossil2012/mindspore
|
8a20b5d784b3fec6d32e058581ec56ec553a06a0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""conv"""
import numpy as np
from mindspore import log as logger
from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore._checkparam import ParamValidator as validator, Rel
from mindspore._checkparam import Validator
from mindspore._checkparam import check_bool, twice, check_int_positive
from mindspore._extends import cell_attr_register
from ..cell import Cell
__all__ = ['Conv2d', 'Conv2dTranspose', 'DepthwiseConv2d', 'Conv1d', 'Conv1dTranspose']
class _Conv(Cell):
"""
Applies a N-D convolution over an input signal composed of several input planes.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=False):
super(_Conv, self).__init__()
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
self.kernel_size = kernel_size
self.stride = stride
self.pad_mode = pad_mode
self.weight_init = weight_init
self.bias_init = bias_init
if isinstance(padding, int):
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
self.padding = padding
elif isinstance(padding, tuple):
for pad in padding:
Validator.check_integer('padding item', pad, 0, Rel.GE, self.cls_name)
self.padding = padding
else:
raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
self.dilation = dilation
self.group = check_int_positive(group)
self.has_bias = has_bias
if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
kernel_size[0] < 1 or kernel_size[1] < 1:
raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
+ str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
+ str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
+ str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.")
if in_channels % group != 0:
raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if out_channels % group != 0:
raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if transposed:
shape = [in_channels, out_channels // group, *kernel_size]
else:
shape = [out_channels, in_channels // group, *kernel_size]
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
else:
if self.bias_init != 'zeros':
logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
self.bias = None
def construct(self, *inputs):
"""Must be overridden by all subclasses."""
raise NotImplementedError
class Conv2d(_Conv):
r"""
2D convolution layer.
Applies a 2D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, H_{in}, W_{in})`,
where :math:`N` is batch size, :math:`C_{in}` is channel number, and :math:`H_{in}, W_{in})` are height and width.
For each batch of shape :math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross-correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
def construct(self, x):
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
@constexpr
def _check_input_3d(input_shape):
if len(input_shape) != 3:
raise ValueError(f"Input should be 3d, but got shape {input_shape}")
class Conv1d(_Conv):
r"""
1D convolution layer.
Applies a 1D convolution over an input tensor which is typically of shape :math:`(N, C_{in}, W_{in})`,
where :math:`N` is batch size and :math:`C_{in}` is channel number. For each batch of shape
:math:`(C_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_w})`, where :math:`\text{ks_w}` is the width of the convolution kernel.
The full kernel has shape :math:`(C_{out}, C_{in} // \text{group}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output width will be
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction of convolution layer can be found in paper `Gradient Based Learning Applied to Document
Recognition <http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (int): The data type is int. Specifies the
width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The output width will be the same as the input.
The total number of padding will be calculated in the horizontal
direction and evenly distributed to left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest width of the output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): An initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 640)
"""
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
super(Conv1d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.padding = (0, 0, padding, padding)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.shape = P.Shape()
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv2dTranspose(_Conv):
r"""
2D transposed convolution layer.
Compute a 2D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, H, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (Union[int, tuple]): int or a tuple of 2 integers, which specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Its value should be equal to or greater than 1.
Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This does not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.Conv2dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 16, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv2dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv2dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
if isinstance(self.padding, int):
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
else:
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding_top + self.padding_bottom)
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding_left + self.padding_right)
if self.has_bias:
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
self.bias)
return self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv1dTranspose(_Conv):
r"""
1D transposed convolution layer.
Compute a 1D transposed convolution, which is also known as a deconvolution
(although it is not an actual deconvolution).
Input is typically of shape :math:`(N, C, W)`, where :math:`N` is batch size and :math:`C` is channel number.
Args:
in_channels (int): The number of channels in the input space.
out_channels (int): The number of channels in the output space.
kernel_size (int): int, which specifies the width of the 1D convolution window.
stride (int): The distance of kernel moving, an int number that represents
the width of movement. Default: 1.
pad_mode (str): Select the mode of the pad. The optional values are
"pad", "same", "valid". Default: "same".
- pad: Implicit paddings on both sides of the input.
- same: Adopted the way of completion.
- valid: Adopted the way of discarding.
padding (int): Implicit paddings on both sides of the input. Default: 0.
dilation (int): The data type is int. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater or equal to 1 and bounded by the width of the
input. Default: 1.
group (int): Split filter into groups, `in_channels` and `out_channels` should be
divisible by the number of groups. This is not support for Davinci devices when group > 1. Default: 1.
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a numbers.Number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, W_{out})`.
Examples:
>>> net = nn.Conv1dTranspose(3, 64, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 3, 50]), mindspore.float32)
>>> net(input)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
# out_channels and in_channels swap.
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel,
# then Conv1dTranspose's out_channel refers to Conv2DBackpropInput's in_channel.
super(Conv1dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.padding = (0, 0, padding, padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
# cause Conv2DBackpropInput's out_channel refers to Conv2D's out_channel.
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=self.padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
"""Calculate the width and height of output."""
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding[0] + self.padding[1])
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding[2] + self.padding[3])
output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class DepthwiseConv2d(Cell):
r"""
2D depthwise convolution layer.
Applies a 2D depthwise convolution over an input tensor which is typically of shape:
math:`(N, C_{in}, H_{in}, W_{in})`, where :math:`N` is batch size and :math:`C_{in}` is channel number.
For each batch of shape:math:`(C_{in}, H_{in}, W_{in})`, the formula is defined as:
.. math::
out_j = \sum_{i=0}^{C_{in} - 1} ccor(W_{ij}, X_i) + b_j,
where :math:`ccor` is the cross correlation operator, :math:`C_{in}` is the input channel number, :math:`j` ranges
from :math:`0` to :math:`C_{out} - 1`, :math:`W_{ij}` corresponds to the :math:`i`-th channel of the :math:`j`-th
filter and :math:`out_{j}` corresponds to the :math:`j`-th channel of the output. :math:`W_{ij}` is a slice
of kernel and it has shape :math:`(\text{ks_h}, \text{ks_w})`, where :math:`\text{ks_h}` and
:math:`\text{ks_w}` are the height and width of the convolution kernel. The full kernel has shape
:math:`(C_{out}, C_{in} // \text{group}, \text{ks_h}, \text{ks_w})`, where group is the group number
to split the input in the channel dimension.
If the 'pad_mode' is set to be "valid", the output height and width will be
:math:`\left \lfloor{1 + \frac{H_{in} + 2 \times \text{padding} - \text{ks_h} -
(\text{ks_h} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` and
:math:`\left \lfloor{1 + \frac{W_{in} + 2 \times \text{padding} - \text{ks_w} -
(\text{ks_w} - 1) \times (\text{dilation} - 1) }{\text{stride}}} \right \rfloor` respectively.
The first introduction can be found in paper `Gradient Based Learning Applied to Document Recognition
<http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf>`_.
Args:
in_channels (int): The number of input channel :math:`C_{in}`.
out_channels (int): The number of output channel :math:`C_{out}`.
kernel_size (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the height
and width of the 2D convolution window. Single int means the value is for both the height and the width of
the kernel. A tuple of 2 ints means the first value is for the height and the other is for the
width of the kernel.
stride (Union[int, tuple[int]]): The distance of kernel moving, an int number that represents
the height and width of movement are both strides, or a tuple of two int numbers that
represent height and width of movement respectively. Default: 1.
pad_mode (str): Specifies padding mode. The optional values are
"same", "valid", "pad". Default: "same".
- same: Adopts the way of completion. The height and width of the output will be the same as
the input. The total number of padding will be calculated in horizontal and vertical
directions and evenly distributed to top and bottom, left and right if possible. Otherwise, the
last extra padding will be done from the bottom and the right side. If this mode is set, `padding`
must be 0.
- valid: Adopts the way of discarding. The possible largest height and width of output will be returned
without padding. Extra pixels will be discarded. If this mode is set, `padding`
must be 0.
- pad: Implicit paddings on both sides of the input. The number of `padding` will be padded to the input
Tensor borders. `padding` should be greater than or equal to 0.
padding (Union[int, tuple[int]]): Implicit paddings on both sides of the input. If `padding` is one integer,
the paddings of top, bottom, left and right are the same, equal to padding. If `padding` is a tuple
with four integers, the paddings of top, bottom, left and right will be equal to padding[0],
padding[1], padding[2], and padding[3] accordingly. Default: 0.
dilation (Union[int, tuple[int]]): The data type is int or a tuple of 2 integers. Specifies the dilation rate
to use for dilated convolution. If set to be :math:`k > 1`, there will
be :math:`k - 1` pixels skipped for each sampling location. Its value should
be greater than or equal to 1 and bounded by the height and width of the
input. Default: 1.
group (int): Split filter into groups, `in_ channels` and `out_channels` should be
divisible by the number of groups. If 'group' is None, it will be set as the value of 'in_channels'
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
weight_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the convolution kernel.
It can be a Tensor, a string, an Initializer or a number. When a string is specified,
values from 'TruncatedNormal', 'Normal', 'Uniform', 'HeUniform' and 'XavierUniform' distributions as well
as constant 'One' and 'Zero' distributions are possible. Alias 'xavier_uniform', 'he_uniform', 'ones'
and 'zeros' are acceptable. Uppercase and lowercase are both acceptable. Refer to the values of
Initializer for more details. Default: 'normal'.
bias_init (Union[Tensor, str, Initializer, numbers.Number]): Initializer for the bias vector. Possible
Initializer and string are the same as 'weight_init'. Refer to the values of
Initializer for more details. Default: 'zeros'.
Inputs:
- **input** (Tensor) - Tensor of shape :math:`(N, C_{in}, H_{in}, W_{in})`.
Outputs:
Tensor of shape :math:`(N, C_{out}, H_{out}, W_{out})`.
Examples:
>>> net = nn.DepthwiseConv2d(120, 240, 4, has_bias=False, weight_init='normal')
>>> input = Tensor(np.ones([1, 120, 1024, 640]), mindspore.float32)
>>> net(input).shape
(1, 240, 1024, 640)
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
group,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
super(DepthwiseConv2d, self).__init__()
self.kernel_size = twice(kernel_size)
self.stride = twice(stride)
self.dilation = twice(dilation)
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
if group is None:
group = in_channels
validator.check_integer('group', group, in_channels, Rel.EQ)
validator.check_integer('group', group, out_channels, Rel.EQ)
validator.check_integer('group', group, 1, Rel.GE)
self.pad_mode = pad_mode
self.dilation = dilation
self.group = group
self.has_bias = has_bias
self.weight_init = weight_init
self.bias_init = bias_init
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
self.padding = padding
self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
kernel_size=self.kernel_size,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation)
self.bias_add = P.BiasAdd()
weight_shape = [1, in_channels, *self.kernel_size]
self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
else:
if bias_init != 'zeros':
logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
self.bias = None
def construct(self, x):
out = self.conv(x, self.weight)
if self.has_bias:
out = self.bias_add(out, self.bias)
return out
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={}, stride={}, ' \
'pad_mode={}, padding={}, dilation={}, group={}, ' \
'has_bias={}, weight_init={}, bias_init={}'.format(
self.in_channels, self.out_channels, self.kernel_size, self.stride,
self.pad_mode, self.padding, self.dilation, self.group,
self.has_bias, self.weight_init, self.bias_init)
if self.has_bias:
s += ', bias={}'.format(self.bias)
return s
| 51.592284
| 119
| 0.581462
|
import numpy as np
from mindspore import log as logger
from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr
from mindspore.common.parameter import Parameter
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor
from mindspore._checkparam import ParamValidator as validator, Rel
from mindspore._checkparam import Validator
from mindspore._checkparam import check_bool, twice, check_int_positive
from mindspore._extends import cell_attr_register
from ..cell import Cell
__all__ = ['Conv2d', 'Conv2dTranspose', 'DepthwiseConv2d', 'Conv1d', 'Conv1dTranspose']
class _Conv(Cell):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=False):
super(_Conv, self).__init__()
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
self.kernel_size = kernel_size
self.stride = stride
self.pad_mode = pad_mode
self.weight_init = weight_init
self.bias_init = bias_init
if isinstance(padding, int):
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
self.padding = padding
elif isinstance(padding, tuple):
for pad in padding:
Validator.check_integer('padding item', pad, 0, Rel.GE, self.cls_name)
self.padding = padding
else:
raise TypeError("padding type must be int/tuple(int) cannot be {}!".format(type(padding)))
self.dilation = dilation
self.group = check_int_positive(group)
self.has_bias = has_bias
if (not isinstance(kernel_size[0], int)) or (not isinstance(kernel_size[1], int)) or \
isinstance(kernel_size[0], bool) or isinstance(kernel_size[1], bool) or \
kernel_size[0] < 1 or kernel_size[1] < 1:
raise ValueError("Attr 'kernel_size' of 'Conv2D' Op passed "
+ str(self.kernel_size) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(stride[0], int)) or (not isinstance(stride[1], int)) or \
isinstance(stride[0], bool) or isinstance(stride[1], bool) or stride[0] < 1 or stride[1] < 1:
raise ValueError("Attr 'stride' of 'Conv2D' Op passed "
+ str(self.stride) + ", should be a int or tuple and equal to or greater than 1.")
if (not isinstance(dilation[0], int)) or (not isinstance(dilation[1], int)) or \
isinstance(dilation[0], bool) or isinstance(dilation[1], bool) or dilation[0] < 1 or dilation[1] < 1:
raise ValueError("Attr 'dilation' of 'Conv2D' Op passed "
+ str(self.dilation) + ", should be a int or tuple and equal to or greater than 1.")
if in_channels % group != 0:
raise ValueError("Attr 'in_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if out_channels % group != 0:
raise ValueError("Attr 'out_channels' of 'Conv2D' Op must be divisible by "
"attr 'group' of 'Conv2D' Op.")
if transposed:
shape = [in_channels, out_channels // group, *kernel_size]
else:
shape = [out_channels, in_channels // group, *kernel_size]
self.weight = Parameter(initializer(self.weight_init, shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(self.bias_init, [out_channels]), name='bias')
else:
if self.bias_init != 'zeros':
logger.warning("Value of 'has_bias' is False, value of 'bias_init' will be ignored.")
self.bias = None
def construct(self, *inputs):
raise NotImplementedError
class Conv2d(_Conv):
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
def construct(self, x):
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
@constexpr
def _check_input_3d(input_shape):
if len(input_shape) != 3:
raise ValueError(f"Input should be 3d, but got shape {input_shape}")
class Conv1d(_Conv):
@cell_attr_register
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
super(Conv1d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init)
self.padding = (0, 0, padding, padding)
self.conv2d = P.Conv2D(out_channel=self.out_channels,
kernel_size=self.kernel_size,
mode=1,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation,
group=self.group)
self.bias_add = P.BiasAdd()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1d\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
self.shape = P.Shape()
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
output = self.conv2d(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(
self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv2dTranspose(_Conv):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
super(Conv2dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv2dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
if isinstance(self.padding, int):
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = (self.padding,) * 4
else:
self.padding_top, self.padding_bottom, self.padding_left, self.padding_right = self.padding
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding_top + self.padding_bottom)
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding_left + self.padding_right)
if self.has_bias:
return self.bias_add(self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out)),
self.bias)
return self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class Conv1dTranspose(_Conv):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
group=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
Validator.check_value_type("kernel_size", kernel_size, [int], self.cls_name)
Validator.check_value_type("stride", stride, [int], self.cls_name)
Validator.check_value_type("padding", padding, [int], self.cls_name)
Validator.check_value_type("dilation", dilation, [int], self.cls_name)
Validator.check_integer('kernel_size', kernel_size, 1, Rel.GE, self.cls_name)
Validator.check_integer('stride', stride, 1, Rel.GE, self.cls_name)
Validator.check_integer('padding', padding, 0, Rel.GE, self.cls_name)
Validator.check_integer('dilation', dilation, 1, Rel.GE, self.cls_name)
kernel_size = (1, kernel_size)
stride = (1, stride)
dilation = (1, dilation)
get_shape = P.Shape()
get_dtype = P.DType()
if isinstance(weight_init, Tensor):
weight_init_shape = get_shape(weight_init)
Validator.check_integer('weight_init_shape', len(weight_init_shape), 3, Rel.EQ, self.cls_name)
weight_init_dtype = get_dtype(weight_init)
weight_init_value = weight_init.asnumpy()
weight_init_value = np.expand_dims(weight_init_value, 2)
weight_init = Tensor(weight_init_value, weight_init_dtype)
super(Conv1dTranspose, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
pad_mode,
padding,
dilation,
group,
has_bias,
weight_init,
bias_init,
transposed=True)
self.padding = (0, 0, padding, padding)
self.in_channels = in_channels
self.out_channels = out_channels
self.shape = P.Shape()
if pad_mode not in ('valid', 'same', 'pad'):
raise ValueError('Attr \'pad_mode\' of \'Conv1dTranspose\' Op passed '
+ str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.')
self.is_valid = self.pad_mode == 'valid'
self.is_same = self.pad_mode == 'same'
self.is_pad = self.pad_mode == 'pad'
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
self.conv2d_transpose = P.Conv2DBackpropInput(out_channel=in_channels,
kernel_size=kernel_size,
mode=1,
pad_mode=pad_mode,
pad=self.padding,
stride=stride,
dilation=dilation,
group=group)
self.bias_add = P.BiasAdd()
self.expand_dims = P.ExpandDims()
self.squeeze = P.Squeeze(2)
def set_strategy(self, strategy):
self.conv2d_transpose.set_strategy(strategy)
return self
def _deconv_output_length(self, input_length, filter_size, stride_size, dilation_size, padding):
length = 0
filter_size = filter_size + (filter_size - 1) * (dilation_size - 1)
if self.is_valid:
if filter_size - stride_size > 0:
length = input_length * stride_size + filter_size - stride_size
else:
length = input_length * stride_size
elif self.is_same:
length = input_length * stride_size
elif self.is_pad:
length = input_length * stride_size - padding + filter_size - stride_size
return length
def construct(self, x):
x_shape = self.shape(x)
_check_input_3d(x_shape)
x = self.expand_dims(x, 2)
n, _, h, w = self.shape(x)
h_out = self._deconv_output_length(h, self.kernel_size[0], self.stride[0], self.dilation[0],
self.padding[0] + self.padding[1])
w_out = self._deconv_output_length(w, self.kernel_size[1], self.stride[1], self.dilation[1],
self.padding[2] + self.padding[3])
output = self.conv2d_transpose(x, self.weight, (n, self.out_channels, h_out, w_out))
if self.has_bias:
output = self.bias_add(output, self.bias)
output = self.squeeze(output)
return output
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={},' \
'stride={}, pad_mode={}, padding={}, dilation={}, ' \
'group={}, has_bias={},' \
'weight_init={}, bias_init={}'.format(self.in_channels,
self.out_channels,
self.kernel_size,
self.stride,
self.pad_mode,
self.padding,
self.dilation,
self.group,
self.has_bias,
self.weight_init,
self.bias_init)
return s
class DepthwiseConv2d(Cell):
def __init__(self,
in_channels,
out_channels,
kernel_size,
group,
stride=1,
pad_mode='same',
padding=0,
dilation=1,
has_bias=False,
weight_init='normal',
bias_init='zeros'):
super(DepthwiseConv2d, self).__init__()
self.kernel_size = twice(kernel_size)
self.stride = twice(stride)
self.dilation = twice(dilation)
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
if group is None:
group = in_channels
validator.check_integer('group', group, in_channels, Rel.EQ)
validator.check_integer('group', group, out_channels, Rel.EQ)
validator.check_integer('group', group, 1, Rel.GE)
self.pad_mode = pad_mode
self.dilation = dilation
self.group = group
self.has_bias = has_bias
self.weight_init = weight_init
self.bias_init = bias_init
Validator.check_value_type('padding', padding, (int, tuple), self.cls_name)
if isinstance(padding, tuple):
Validator.check_integer('padding size', len(padding), 4, Rel.EQ, self.cls_name)
self.padding = padding
self.conv = P.DepthwiseConv2dNative(channel_multiplier=1,
kernel_size=self.kernel_size,
pad_mode=self.pad_mode,
pad=self.padding,
stride=self.stride,
dilation=self.dilation)
self.bias_add = P.BiasAdd()
weight_shape = [1, in_channels, *self.kernel_size]
self.weight = Parameter(initializer(weight_init, weight_shape), name='weight')
if check_bool(has_bias):
self.bias = Parameter(initializer(bias_init, [out_channels]), name='bias')
else:
if bias_init != 'zeros':
logger.warning("value of `has_bias` is False, value of `bias_init` will be ignore.")
self.bias = None
def construct(self, x):
out = self.conv(x, self.weight)
if self.has_bias:
out = self.bias_add(out, self.bias)
return out
def extend_repr(self):
s = 'input_channels={}, output_channels={}, kernel_size={}, stride={}, ' \
'pad_mode={}, padding={}, dilation={}, group={}, ' \
'has_bias={}, weight_init={}, bias_init={}'.format(
self.in_channels, self.out_channels, self.kernel_size, self.stride,
self.pad_mode, self.padding, self.dilation, self.group,
self.has_bias, self.weight_init, self.bias_init)
if self.has_bias:
s += ', bias={}'.format(self.bias)
return s
| true
| true
|
790bbed35afb5241238f5607dd49024352cacced
| 4,624
|
py
|
Python
|
chroma_core/services/job_scheduler/lock_cache.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 52
|
2018-09-13T03:26:23.000Z
|
2022-03-25T16:51:37.000Z
|
chroma_core/services/job_scheduler/lock_cache.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 1,264
|
2018-06-15T19:50:57.000Z
|
2022-03-28T08:19:04.000Z
|
chroma_core/services/job_scheduler/lock_cache.py
|
beevans/integrated-manager-for-lustre
|
6b7e49b8a58058e6139ad815a4388f21a581dfa0
|
[
"MIT"
] | 27
|
2018-06-18T08:51:59.000Z
|
2022-03-16T15:35:34.000Z
|
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
import json
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class LockCache(object):
# Lock change receivers are called whenever a change occurs to the locks. It allows something to
# respond to changes. An example would be long polling.
# The receivers are called with the lock being removed and LOCK_ADD or LOCK_REMOVE as the paramter.
lock_change_receivers = []
LOCK_ADD = 1
LOCK_REMOVE = 2
def __init__(self):
from chroma_core.models import Job, StateLock
self.write_locks = []
self.write_by_item = defaultdict(list)
self.read_locks = []
self.read_by_item = defaultdict(list)
self.all_by_job = defaultdict(list)
self.all_by_item = defaultdict(list)
for job in Job.objects.filter(~Q(state="complete")):
if job.locks_json:
locks = json.loads(job.locks_json)
for lock in locks:
self._add(StateLock.from_dict(job, lock))
def call_receivers(self, lock, add_remove):
for lock_change_receiver in self.lock_change_receivers:
lock_change_receiver(lock, add_remove)
def remove_job(self, job):
locks = list(self.all_by_job[job.id])
n = len(locks)
for lock in locks:
if lock.write:
self.write_locks.remove(lock)
self.write_by_item[lock.locked_item].remove(lock)
else:
self.read_locks.remove(lock)
self.read_by_item[lock.locked_item].remove(lock)
self.all_by_job[job.id].remove(lock)
self.all_by_item[lock.locked_item].remove(lock)
self.call_receivers(lock, self.LOCK_REMOVE)
return n
def add(self, lock):
self._add(lock)
def _add(self, lock):
assert lock.job.id is not None
if lock.write:
self.write_locks.append(lock)
self.write_by_item[lock.locked_item].append(lock)
else:
self.read_locks.append(lock)
self.read_by_item[lock.locked_item].append(lock)
self.all_by_job[lock.job.id].append(lock)
self.all_by_item[lock.locked_item].append(lock)
self.call_receivers(lock, self.LOCK_ADD)
def get_by_job(self, job):
return self.all_by_job[job.id]
def get_all(self, locked_item):
return self.all_by_item[locked_item]
def get_latest_write(self, locked_item, not_job=None):
try:
if not_job is not None:
return sorted(
[l for l in self.write_by_item[locked_item] if l.job != not_job],
lambda a, b: cmp(a.job.id, b.job.id),
)[-1]
return sorted(self.write_by_item[locked_item], lambda a, b: cmp(a.job.id, b.job.id))[-1]
except IndexError:
return None
def get_read_locks(self, locked_item, after, not_job):
return [x for x in self.read_by_item[locked_item] if after <= x.job.id and x.job != not_job]
def get_write(self, locked_item):
return self.write_by_item[locked_item]
def get_by_locked_item(self, item):
return self.all_by_item[item]
def get_write_by_locked_item(self):
result = {}
for locked_item, locks in self.write_by_item.items():
if locks:
result[locked_item] = sorted(locks, lambda a, b: cmp(a.job.id, b.job.id))[-1]
return result
def lock_change_receiver():
"""
A decorator for connecting receivers to signals that a lock has change.
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
LockCache.lock_change_receivers.append(func)
return func
return _decorator
def to_lock_json(lock, add_remove=LockCache.LOCK_ADD):
if getattr(lock.locked_item, "downcast", None) and callable(lock.locked_item.downcast):
item = lock.locked_item.downcast()
else:
item = lock.locked_item
return {
"job_id": lock.job.id,
"content_type_id": ContentType.objects.get_for_model(item).id,
"item_id": lock.locked_item.id,
"uuid": lock.uuid,
"description": lock.job.description(),
"lock_type": "write" if lock.write else "read",
"action": "add" if add_remove == LockCache.LOCK_ADD else "remove",
}
| 33.028571
| 103
| 0.632353
|
from collections import defaultdict
import json
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class LockCache(object):
lock_change_receivers = []
LOCK_ADD = 1
LOCK_REMOVE = 2
def __init__(self):
from chroma_core.models import Job, StateLock
self.write_locks = []
self.write_by_item = defaultdict(list)
self.read_locks = []
self.read_by_item = defaultdict(list)
self.all_by_job = defaultdict(list)
self.all_by_item = defaultdict(list)
for job in Job.objects.filter(~Q(state="complete")):
if job.locks_json:
locks = json.loads(job.locks_json)
for lock in locks:
self._add(StateLock.from_dict(job, lock))
def call_receivers(self, lock, add_remove):
for lock_change_receiver in self.lock_change_receivers:
lock_change_receiver(lock, add_remove)
def remove_job(self, job):
locks = list(self.all_by_job[job.id])
n = len(locks)
for lock in locks:
if lock.write:
self.write_locks.remove(lock)
self.write_by_item[lock.locked_item].remove(lock)
else:
self.read_locks.remove(lock)
self.read_by_item[lock.locked_item].remove(lock)
self.all_by_job[job.id].remove(lock)
self.all_by_item[lock.locked_item].remove(lock)
self.call_receivers(lock, self.LOCK_REMOVE)
return n
def add(self, lock):
self._add(lock)
def _add(self, lock):
assert lock.job.id is not None
if lock.write:
self.write_locks.append(lock)
self.write_by_item[lock.locked_item].append(lock)
else:
self.read_locks.append(lock)
self.read_by_item[lock.locked_item].append(lock)
self.all_by_job[lock.job.id].append(lock)
self.all_by_item[lock.locked_item].append(lock)
self.call_receivers(lock, self.LOCK_ADD)
def get_by_job(self, job):
return self.all_by_job[job.id]
def get_all(self, locked_item):
return self.all_by_item[locked_item]
def get_latest_write(self, locked_item, not_job=None):
try:
if not_job is not None:
return sorted(
[l for l in self.write_by_item[locked_item] if l.job != not_job],
lambda a, b: cmp(a.job.id, b.job.id),
)[-1]
return sorted(self.write_by_item[locked_item], lambda a, b: cmp(a.job.id, b.job.id))[-1]
except IndexError:
return None
def get_read_locks(self, locked_item, after, not_job):
return [x for x in self.read_by_item[locked_item] if after <= x.job.id and x.job != not_job]
def get_write(self, locked_item):
return self.write_by_item[locked_item]
def get_by_locked_item(self, item):
return self.all_by_item[item]
def get_write_by_locked_item(self):
result = {}
for locked_item, locks in self.write_by_item.items():
if locks:
result[locked_item] = sorted(locks, lambda a, b: cmp(a.job.id, b.job.id))[-1]
return result
def lock_change_receiver():
def _decorator(func):
LockCache.lock_change_receivers.append(func)
return func
return _decorator
def to_lock_json(lock, add_remove=LockCache.LOCK_ADD):
if getattr(lock.locked_item, "downcast", None) and callable(lock.locked_item.downcast):
item = lock.locked_item.downcast()
else:
item = lock.locked_item
return {
"job_id": lock.job.id,
"content_type_id": ContentType.objects.get_for_model(item).id,
"item_id": lock.locked_item.id,
"uuid": lock.uuid,
"description": lock.job.description(),
"lock_type": "write" if lock.write else "read",
"action": "add" if add_remove == LockCache.LOCK_ADD else "remove",
}
| true
| true
|
790bbee323f40bb34a051962085378910292dc4b
| 16,503
|
py
|
Python
|
dist/awscli/customizations/datapipeline/__init__.py
|
claytonbrown/SublimeLinter-contrib-AWS-Cloudformation-JSON
|
bb778ee4ff56e95fc8ee76b8a20deac8a9894bf2
|
[
"MIT"
] | null | null | null |
dist/awscli/customizations/datapipeline/__init__.py
|
claytonbrown/SublimeLinter-contrib-AWS-Cloudformation-JSON
|
bb778ee4ff56e95fc8ee76b8a20deac8a9894bf2
|
[
"MIT"
] | null | null | null |
dist/awscli/customizations/datapipeline/__init__.py
|
claytonbrown/SublimeLinter-contrib-AWS-Cloudformation-JSON
|
bb778ee4ff56e95fc8ee76b8a20deac8a9894bf2
|
[
"MIT"
] | null | null | null |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from datetime import datetime, timedelta
from awscli.formatter import get_formatter
from awscli.arguments import CustomArgument
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline import translator
from awscli.customizations.datapipeline.createdefaultroles \
import CreateDefaultRoles
from awscli.customizations.datapipeline.listrunsformatter \
import ListRunsFormatter
DEFINITION_HELP_TEXT = """\
The JSON pipeline definition. If the pipeline definition
is in a file you can use the file://<filename> syntax to
specify a filename.
"""
PARAMETER_OBJECTS_HELP_TEXT = """\
The JSON parameter objects. If the parameter objects are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter objects provided
on command line would replace the one in definition.
"""
PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. If the parameter values are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter values provided
on command line would replace the one in definition.
"""
INLINE_PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. You can specify these as
key-value pairs in the key=value format. Multiple parameters
are separated by a space. For list type parameter values
you can use the same key name and specify each value as
a key value pair. e.g. arrayValue=value1 arrayValue=value2
"""
class DocSectionNotFoundError(Exception):
pass
class ParameterDefinitionError(Exception):
def __init__(self, msg):
full_msg = ("Error in parameter: %s\n" % msg)
super(ParameterDefinitionError, self).__init__(full_msg)
self.msg = msg
def register_customizations(cli):
cli.register(
'building-argument-table.datapipeline.put-pipeline-definition',
add_pipeline_definition)
cli.register(
'building-argument-table.datapipeline.activate-pipeline',
activate_pipeline_definition)
cli.register(
'after-call.datapipeline.GetPipelineDefinition',
translate_definition)
cli.register(
'building-command-table.datapipeline',
register_commands)
cli.register_last(
'doc-output.datapipeline.get-pipeline-definition',
document_translation)
def register_commands(command_table, session, **kwargs):
command_table['list-runs'] = ListRunsCommand(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
def document_translation(help_command, **kwargs):
# Remove all the writes until we get to the output.
# I don't think this is the ideal way to do this, we should
# improve our plugin/doc system to make this easier.
doc = help_command.doc
current = ''
while current != '======\nOutput\n======':
try:
current = doc.pop_write()
except IndexError:
# This should never happen, but in the rare case that it does
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
% help_command)
doc.write('======\nOutput\n======')
doc.write(
'\nThe output of this command is the pipeline definition, which'
' is documented in the '
'`Pipeline Definition File Syntax '
'<http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/'
'dp-writing-pipeline-definition.html>`__')
def add_pipeline_definition(argument_table, **kwargs):
argument_table['pipeline-definition'] = PipelineDefinitionArgument(
'pipeline-definition', required=True,
help_text=DEFINITION_HELP_TEXT)
argument_table['parameter-objects'] = ParameterObjectsArgument(
'parameter-objects', required=False,
help_text=PARAMETER_OBJECTS_HELP_TEXT)
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri',
required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT)
# The pipeline-objects is no longer needed required because
# a user can provide a pipeline-definition instead.
# get-pipeline-definition also displays the output in the
# translated format.
del argument_table['pipeline-objects']
def activate_pipeline_definition(argument_table, **kwargs):
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri', required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT,
)
def translate_definition(parsed, **kwargs):
translator.api_to_definition(parsed)
def convert_described_objects(api_describe_objects, sort_key_func=None):
# We need to take a field list that looks like this:
# {u'key': u'@sphere', u'stringValue': u'INSTANCE'},
# into {"@sphere": "INSTANCE}.
# We convert the fields list into a field dict.
converted = []
for obj in api_describe_objects:
new_fields = {
'@id': obj['id'],
'name': obj['name'],
}
for field in obj['fields']:
new_fields[field['key']] = field.get('stringValue',
field.get('refValue'))
converted.append(new_fields)
if sort_key_func is not None:
converted.sort(key=sort_key_func)
return converted
class QueryArgBuilder(object):
"""
Convert CLI arguments to Query arguments used by QueryObject.
"""
def __init__(self, current_time=None):
if current_time is None:
current_time = datetime.utcnow()
self.current_time = current_time
def build_query(self, parsed_args):
selectors = []
if parsed_args.start_interval is None and \
parsed_args.schedule_interval is None:
# If no intervals are specified, default
# to a start time of 4 days ago and an end time
# of right now.
end_datetime = self.current_time
start_datetime = end_datetime - timedelta(days=4)
start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S')
end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S')
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
else:
self._build_schedule_times(selectors, parsed_args)
if parsed_args.status is not None:
self._build_status(selectors, parsed_args)
query = {'selectors': selectors}
return query
def _build_schedule_times(self, selectors, parsed_args):
if parsed_args.start_interval is not None:
start_time_str = parsed_args.start_interval[0]
end_time_str = parsed_args.start_interval[1]
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
if parsed_args.schedule_interval is not None:
start_time_str = parsed_args.schedule_interval[0]
end_time_str = parsed_args.schedule_interval[1]
selectors.append({
'fieldName': '@scheduledStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
def _build_status(self, selectors, parsed_args):
selectors.append({
'fieldName': '@status',
'operator': {
'type': 'EQ',
'values': [status.upper() for status in parsed_args.status]
}
})
class PipelineDefinitionArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
api_objects = translator.definition_to_api_objects(parsed)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['pipelineObjects'] = api_objects
# Use Parameter objects and values from def if not already provided
if 'parameterObjects' not in parameters \
and parameter_objects is not None:
parameters['parameterObjects'] = parameter_objects
if 'parameterValues' not in parameters \
and parameter_values is not None:
parameters['parameterValues'] = parameter_values
class ParameterObjectsArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameters['parameterObjects'] = parameter_objects
class ParameterValuesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parsed = json.loads(value)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ParameterValuesInlineArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parameter_object = {}
# break string into = point
for argument in value:
try:
argument_components = argument.split('=', 1)
key = argument_components[0]
value = argument_components[1]
if key in parameter_object:
parameter_object[key] = [parameter_object[key], value]
else:
parameter_object[key] = value
except IndexError:
raise ParameterDefinitionError(
"Invalid inline parameter format: %s" % argument
)
parsed = {'values': parameter_object}
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ListRunsCommand(BasicCommand):
NAME = 'list-runs'
DESCRIPTION = (
'Lists the times the specified pipeline has run. '
'You can optionally filter the complete list of '
'results to include only the runs you are interested in.')
ARG_TABLE = [
{'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.',
'action': 'store', 'required': True, 'cli_type_name': 'string', },
{'name': 'status',
'help_text': (
'Filters the list to include only runs in the '
'specified statuses. '
'The valid statuses are as follows: waiting, pending, cancelled, '
'running, finished, failed, waiting_for_runner, '
'and waiting_on_dependencies. You can combine statuses as a '
'comma-separated list. For example: '
'<code>--status pending,waiting_on_dependencies</code>'),
'action': 'store'},
{'name': 'start-interval',
'help_text': (
'Filters the list to include only runs that started '
'within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
{'name': 'schedule-interval',
'help_text': (
'Filters the list to include only runs that are scheduled to '
'start within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
]
VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running',
'finished', 'failed', 'waiting_for_runner',
'waiting_on_dependencies', 'shutting_down']
def _run_main(self, parsed_args, parsed_globals, **kwargs):
self._set_client(parsed_globals)
self._parse_type_args(parsed_args)
self._list_runs(parsed_args, parsed_globals)
def _set_client(self, parsed_globals):
# This is called from _run_main and is used to ensure that we have
# a service/endpoint object to work with.
self.client = self._session.create_client(
'datapipeline',
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl)
def _parse_type_args(self, parsed_args):
# TODO: give good error messages!
# Parse the start/schedule times.
# Parse the status csv.
if parsed_args.start_interval is not None:
parsed_args.start_interval = [
arg.strip() for arg in
parsed_args.start_interval.split(',')]
if parsed_args.schedule_interval is not None:
parsed_args.schedule_interval = [
arg.strip() for arg in
parsed_args.schedule_interval.split(',')]
if parsed_args.status is not None:
parsed_args.status = [
arg.strip() for arg in
parsed_args.status.split(',')]
self._validate_status_choices(parsed_args.status)
def _validate_status_choices(self, statuses):
for status in statuses:
if status not in self.VALID_STATUS:
raise ValueError("Invalid status: %s, must be one of: %s" %
(status, ', '.join(self.VALID_STATUS)))
def _list_runs(self, parsed_args, parsed_globals):
query = QueryArgBuilder().build_query(parsed_args)
object_ids = self._query_objects(parsed_args.pipeline_id, query)
objects = self._describe_objects(parsed_args.pipeline_id, object_ids)[
'pipelineObjects']
converted = convert_described_objects(
objects,
sort_key_func=lambda x: (x.get('@scheduledStartTime'),
x.get('name')))
formatter = self._get_formatter(parsed_globals)
formatter(self.NAME, converted)
def _describe_objects(self, pipeline_id, object_ids):
parsed = self.client.describe_objects(
pipelineId=pipeline_id, objectIds=object_ids)
return parsed
def _query_objects(self, pipeline_id, query):
paginator = self.client.get_paginator('query_objects').paginate(
pipelineId=pipeline_id,
sphere='INSTANCE', query=query)
parsed = paginator.build_full_result()
return parsed['ids']
def _get_formatter(self, parsed_globals):
output = parsed_globals.output
if output is None:
return ListRunsFormatter(parsed_globals)
else:
return get_formatter(output, parsed_globals)
| 39.106635
| 79
| 0.64558
|
import json
from datetime import datetime, timedelta
from awscli.formatter import get_formatter
from awscli.arguments import CustomArgument
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline import translator
from awscli.customizations.datapipeline.createdefaultroles \
import CreateDefaultRoles
from awscli.customizations.datapipeline.listrunsformatter \
import ListRunsFormatter
DEFINITION_HELP_TEXT = """\
The JSON pipeline definition. If the pipeline definition
is in a file you can use the file://<filename> syntax to
specify a filename.
"""
PARAMETER_OBJECTS_HELP_TEXT = """\
The JSON parameter objects. If the parameter objects are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter objects provided
on command line would replace the one in definition.
"""
PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. If the parameter values are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter values provided
on command line would replace the one in definition.
"""
INLINE_PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. You can specify these as
key-value pairs in the key=value format. Multiple parameters
are separated by a space. For list type parameter values
you can use the same key name and specify each value as
a key value pair. e.g. arrayValue=value1 arrayValue=value2
"""
class DocSectionNotFoundError(Exception):
pass
class ParameterDefinitionError(Exception):
def __init__(self, msg):
full_msg = ("Error in parameter: %s\n" % msg)
super(ParameterDefinitionError, self).__init__(full_msg)
self.msg = msg
def register_customizations(cli):
cli.register(
'building-argument-table.datapipeline.put-pipeline-definition',
add_pipeline_definition)
cli.register(
'building-argument-table.datapipeline.activate-pipeline',
activate_pipeline_definition)
cli.register(
'after-call.datapipeline.GetPipelineDefinition',
translate_definition)
cli.register(
'building-command-table.datapipeline',
register_commands)
cli.register_last(
'doc-output.datapipeline.get-pipeline-definition',
document_translation)
def register_commands(command_table, session, **kwargs):
command_table['list-runs'] = ListRunsCommand(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
def document_translation(help_command, **kwargs):
# improve our plugin/doc system to make this easier.
doc = help_command.doc
current = ''
while current != '======\nOutput\n======':
try:
current = doc.pop_write()
except IndexError:
# This should never happen, but in the rare case that it does
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
% help_command)
doc.write('======\nOutput\n======')
doc.write(
'\nThe output of this command is the pipeline definition, which'
' is documented in the '
'`Pipeline Definition File Syntax '
'<http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/'
'dp-writing-pipeline-definition.html>`__')
def add_pipeline_definition(argument_table, **kwargs):
argument_table['pipeline-definition'] = PipelineDefinitionArgument(
'pipeline-definition', required=True,
help_text=DEFINITION_HELP_TEXT)
argument_table['parameter-objects'] = ParameterObjectsArgument(
'parameter-objects', required=False,
help_text=PARAMETER_OBJECTS_HELP_TEXT)
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri',
required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT)
# The pipeline-objects is no longer needed required because
# a user can provide a pipeline-definition instead.
# get-pipeline-definition also displays the output in the
# translated format.
del argument_table['pipeline-objects']
def activate_pipeline_definition(argument_table, **kwargs):
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri', required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT,
)
def translate_definition(parsed, **kwargs):
translator.api_to_definition(parsed)
def convert_described_objects(api_describe_objects, sort_key_func=None):
# We need to take a field list that looks like this:
# {u'key': u'@sphere', u'stringValue': u'INSTANCE'},
# into {"@sphere": "INSTANCE}.
# We convert the fields list into a field dict.
converted = []
for obj in api_describe_objects:
new_fields = {
'@id': obj['id'],
'name': obj['name'],
}
for field in obj['fields']:
new_fields[field['key']] = field.get('stringValue',
field.get('refValue'))
converted.append(new_fields)
if sort_key_func is not None:
converted.sort(key=sort_key_func)
return converted
class QueryArgBuilder(object):
def __init__(self, current_time=None):
if current_time is None:
current_time = datetime.utcnow()
self.current_time = current_time
def build_query(self, parsed_args):
selectors = []
if parsed_args.start_interval is None and \
parsed_args.schedule_interval is None:
# If no intervals are specified, default
# to a start time of 4 days ago and an end time
# of right now.
end_datetime = self.current_time
start_datetime = end_datetime - timedelta(days=4)
start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S')
end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S')
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
else:
self._build_schedule_times(selectors, parsed_args)
if parsed_args.status is not None:
self._build_status(selectors, parsed_args)
query = {'selectors': selectors}
return query
def _build_schedule_times(self, selectors, parsed_args):
if parsed_args.start_interval is not None:
start_time_str = parsed_args.start_interval[0]
end_time_str = parsed_args.start_interval[1]
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
if parsed_args.schedule_interval is not None:
start_time_str = parsed_args.schedule_interval[0]
end_time_str = parsed_args.schedule_interval[1]
selectors.append({
'fieldName': '@scheduledStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
def _build_status(self, selectors, parsed_args):
selectors.append({
'fieldName': '@status',
'operator': {
'type': 'EQ',
'values': [status.upper() for status in parsed_args.status]
}
})
class PipelineDefinitionArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
api_objects = translator.definition_to_api_objects(parsed)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['pipelineObjects'] = api_objects
# Use Parameter objects and values from def if not already provided
if 'parameterObjects' not in parameters \
and parameter_objects is not None:
parameters['parameterObjects'] = parameter_objects
if 'parameterValues' not in parameters \
and parameter_values is not None:
parameters['parameterValues'] = parameter_values
class ParameterObjectsArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameters['parameterObjects'] = parameter_objects
class ParameterValuesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parsed = json.loads(value)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ParameterValuesInlineArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parameter_object = {}
# break string into = point
for argument in value:
try:
argument_components = argument.split('=', 1)
key = argument_components[0]
value = argument_components[1]
if key in parameter_object:
parameter_object[key] = [parameter_object[key], value]
else:
parameter_object[key] = value
except IndexError:
raise ParameterDefinitionError(
"Invalid inline parameter format: %s" % argument
)
parsed = {'values': parameter_object}
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ListRunsCommand(BasicCommand):
NAME = 'list-runs'
DESCRIPTION = (
'Lists the times the specified pipeline has run. '
'You can optionally filter the complete list of '
'results to include only the runs you are interested in.')
ARG_TABLE = [
{'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.',
'action': 'store', 'required': True, 'cli_type_name': 'string', },
{'name': 'status',
'help_text': (
'Filters the list to include only runs in the '
'specified statuses. '
'The valid statuses are as follows: waiting, pending, cancelled, '
'running, finished, failed, waiting_for_runner, '
'and waiting_on_dependencies. You can combine statuses as a '
'comma-separated list. For example: '
'<code>--status pending,waiting_on_dependencies</code>'),
'action': 'store'},
{'name': 'start-interval',
'help_text': (
'Filters the list to include only runs that started '
'within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
{'name': 'schedule-interval',
'help_text': (
'Filters the list to include only runs that are scheduled to '
'start within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
]
VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running',
'finished', 'failed', 'waiting_for_runner',
'waiting_on_dependencies', 'shutting_down']
def _run_main(self, parsed_args, parsed_globals, **kwargs):
self._set_client(parsed_globals)
self._parse_type_args(parsed_args)
self._list_runs(parsed_args, parsed_globals)
def _set_client(self, parsed_globals):
# This is called from _run_main and is used to ensure that we have
# a service/endpoint object to work with.
self.client = self._session.create_client(
'datapipeline',
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl)
def _parse_type_args(self, parsed_args):
# TODO: give good error messages!
# Parse the start/schedule times.
# Parse the status csv.
if parsed_args.start_interval is not None:
parsed_args.start_interval = [
arg.strip() for arg in
parsed_args.start_interval.split(',')]
if parsed_args.schedule_interval is not None:
parsed_args.schedule_interval = [
arg.strip() for arg in
parsed_args.schedule_interval.split(',')]
if parsed_args.status is not None:
parsed_args.status = [
arg.strip() for arg in
parsed_args.status.split(',')]
self._validate_status_choices(parsed_args.status)
def _validate_status_choices(self, statuses):
for status in statuses:
if status not in self.VALID_STATUS:
raise ValueError("Invalid status: %s, must be one of: %s" %
(status, ', '.join(self.VALID_STATUS)))
def _list_runs(self, parsed_args, parsed_globals):
query = QueryArgBuilder().build_query(parsed_args)
object_ids = self._query_objects(parsed_args.pipeline_id, query)
objects = self._describe_objects(parsed_args.pipeline_id, object_ids)[
'pipelineObjects']
converted = convert_described_objects(
objects,
sort_key_func=lambda x: (x.get('@scheduledStartTime'),
x.get('name')))
formatter = self._get_formatter(parsed_globals)
formatter(self.NAME, converted)
def _describe_objects(self, pipeline_id, object_ids):
parsed = self.client.describe_objects(
pipelineId=pipeline_id, objectIds=object_ids)
return parsed
def _query_objects(self, pipeline_id, query):
paginator = self.client.get_paginator('query_objects').paginate(
pipelineId=pipeline_id,
sphere='INSTANCE', query=query)
parsed = paginator.build_full_result()
return parsed['ids']
def _get_formatter(self, parsed_globals):
output = parsed_globals.output
if output is None:
return ListRunsFormatter(parsed_globals)
else:
return get_formatter(output, parsed_globals)
| true
| true
|
790bbf4ae0b9bb04e22928415e35dcd30354d70d
| 501
|
py
|
Python
|
services/gen_der_dict.py
|
ishine/self_supervised_AHC
|
59c3a05dfe2f0fc24f54a316d87bc28b07bcdd9a
|
[
"Apache-2.0"
] | 10
|
2020-08-11T02:58:31.000Z
|
2022-03-18T06:39:38.000Z
|
services/gen_der_dict.py
|
ishine/self_supervised_AHC
|
59c3a05dfe2f0fc24f54a316d87bc28b07bcdd9a
|
[
"Apache-2.0"
] | 2
|
2021-12-07T10:33:58.000Z
|
2021-12-16T05:15:32.000Z
|
services/gen_der_dict.py
|
ishine/self_supervised_AHC
|
59c3a05dfe2f0fc24f54a316d87bc28b07bcdd9a
|
[
"Apache-2.0"
] | 4
|
2020-08-04T00:33:18.000Z
|
2021-12-08T03:33:07.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 19:14:52 2020
@author: prachi
"""
import pickle
import numpy as np
der='swbd_diar/exp_new/callhome/plda_oracle/der.scp'
der_pickle = 'swbd_diar/exp_new/callhome/plda_oracle/derdict'
der=open(der,'r').readlines()
DER={}
for line in der[2:-1]:
fname = line.split()[0]
val = float(line.split()[1])
DER[fname] = val
pickleobj=open(der_pickle,'wb')
pickle.dump(DER,pickleobj)
pickleobj.close()
| 20.875
| 62
| 0.658683
|
import pickle
import numpy as np
der='swbd_diar/exp_new/callhome/plda_oracle/der.scp'
der_pickle = 'swbd_diar/exp_new/callhome/plda_oracle/derdict'
der=open(der,'r').readlines()
DER={}
for line in der[2:-1]:
fname = line.split()[0]
val = float(line.split()[1])
DER[fname] = val
pickleobj=open(der_pickle,'wb')
pickle.dump(DER,pickleobj)
pickleobj.close()
| true
| true
|
790bc0b9a51c9b8febf6e51afe0293d9f49d74ec
| 2,997
|
py
|
Python
|
local_configs/10.18/cg10+w=2+es130k.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
local_configs/10.18/cg10+w=2+es130k.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
local_configs/10.18/cg10+w=2+es130k.py
|
wzpscott/SegformerDistillation
|
6558757f5071251410e90270e197755860a6f41c
|
[
"DOC"
] | null | null | null |
_base_ = [
'../_base_/datasets/ade20k_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='SDModule',
cfg_s=dict(
type='EncoderDecoder',
pretrained='pretrained/mit_b0.pth',
backbone=dict(
type='mit_b0',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=256),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
cfg_t=dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b4',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
),
distillation = [
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'KLDLoss',
'loss_config':{
'weight':2,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'mask_config':False,
'transform_config':{'loss_type':'channel','group_size':10},
'ff_config':False,
'earlystop_config':130000,
},
},
],
s_pretrain = './pretrained/mit_b0.pth', # 学生的预训练模型
t_pretrain = './pretrained/segformer.b4.512x512.ade.160k.pth', # 老师的预训练模型
train_cfg=dict(),
test_cfg=dict(mode='whole'),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9,0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
work_dir = '/apdcephfs/private_inchzhang/shared_info/10.18/cg10+w=2+es130k'
data = dict(samples_per_gpu=2)
evaluation = dict(interval=16000, metric='mIoU')
# resume_from = ''
| 34.848837
| 95
| 0.538872
|
_base_ = [
'../_base_/datasets/ade20k_repeat.py',
'../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k_adamw.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='SDModule',
cfg_s=dict(
type='EncoderDecoder',
pretrained='pretrained/mit_b0.pth',
backbone=dict(
type='mit_b0',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[32, 64, 160, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=256),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
),
cfg_t=dict(
type='EncoderDecoder',
backbone=dict(
type='mit_b4',
style='pytorch'),
decode_head=dict(
type='SegFormerHead',
in_channels=[64, 128, 320, 512],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=0.1,
num_classes=150,
norm_cfg=norm_cfg,
align_corners=False,
decoder_params=dict(embed_dim=768),
loss_decode=dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
),
distillation = [
{'student_layer':'decode_head.linear_pred',
'teacher_layer':'decode_head.linear_pred',
'loss_name':'KLDLoss',
'loss_config':{
'weight':2,
'tau':1,
'reshape_config':'logits',
'resize_config':{'mode':'bilinear','align_corners':False},
'mask_config':False,
'transform_config':{'loss_type':'channel','group_size':10},
'ff_config':False,
'earlystop_config':130000,
},
},
],
s_pretrain = './pretrained/mit_b0.pth',
t_pretrain = './pretrained/segformer.b4.512x512.ade.160k.pth',
train_cfg=dict(),
test_cfg=dict(mode='whole'),
)
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9,0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'pos_block': dict(decay_mult=0.),
'norm': dict(decay_mult=0.),
'head': dict(lr_mult=10.)
}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
work_dir = '/apdcephfs/private_inchzhang/shared_info/10.18/cg10+w=2+es130k'
data = dict(samples_per_gpu=2)
evaluation = dict(interval=16000, metric='mIoU')
| true
| true
|
790bc136cffade49b8cb81b0413b948af3fe17e2
| 166
|
py
|
Python
|
emailautomate/views.py
|
pradyneel/xtreme-weather
|
0e19a1ead2d88ec474d210709e6398c5d2b6cc5b
|
[
"MIT"
] | null | null | null |
emailautomate/views.py
|
pradyneel/xtreme-weather
|
0e19a1ead2d88ec474d210709e6398c5d2b6cc5b
|
[
"MIT"
] | null | null | null |
emailautomate/views.py
|
pradyneel/xtreme-weather
|
0e19a1ead2d88ec474d210709e6398c5d2b6cc5b
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
def index(request):
return HttpResponse("Check URL => /admin")
| 27.666667
| 46
| 0.771084
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return HttpResponse("Check URL => /admin")
| true
| true
|
790bc1b095a73cd6ad6e3aa12ae444551dff78f4
| 4,338
|
py
|
Python
|
util_common/nlp/Sumy/summarizers/lsa.py
|
cscyuge/pointer-generator
|
74b3b974e72209dc7a4045cabb758465998c920a
|
[
"MIT"
] | 56
|
2019-03-16T09:49:57.000Z
|
2021-09-20T08:24:29.000Z
|
util_common/nlp/Sumy/summarizers/lsa.py
|
cscyuge/pointer-generator
|
74b3b974e72209dc7a4045cabb758465998c920a
|
[
"MIT"
] | 10
|
2019-03-30T01:57:22.000Z
|
2020-12-01T02:25:54.000Z
|
util_common/nlp/Sumy/summarizers/lsa.py
|
cscyuge/pointer-generator
|
74b3b974e72209dc7a4045cabb758465998c920a
|
[
"MIT"
] | 12
|
2019-04-02T12:25:40.000Z
|
2020-10-09T16:06:49.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from warnings import warn
try:
import numpy
except ImportError:
numpy = None
try:
from numpy.linalg import svd as singular_value_decomposition
except ImportError:
singular_value_decomposition = None
from ._summarizer import AbstractSummarizer
class LsaSummarizer(AbstractSummarizer):
MIN_DIMENSIONS = 3
REDUCTION_RATIO = 1/1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependecies_installed()
dictionary = self._create_dictionary(document)
# empty document
if not dictionary:
return ()
matrix = self._create_matrix(document, dictionary)
matrix = self._compute_term_frequency(matrix)
u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
return self._get_best_sentences(document.sentences, sentences_count,
lambda s: next(ranks))
def _ensure_dependecies_installed(self):
if numpy is None:
raise ValueError("LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _create_dictionary(self, document):
"""Creates mapping key = word, value = row index"""
# print(document.words)
words = map(self.normalize_word, document.words)
unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)
return dict((w, i) for i, w in enumerate(unique_words))
def _create_matrix(self, document, dictionary):
"""
Creates matrix of shape |unique words|×|sentences| where cells
contains number of occurences of words (rows) in senteces (cols).
"""
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if words_count < sentences_count:
message = (
"Number of words (%d) is lower than number of sentences (%d). "
"LSA algorithm may not work properly."
)
warn(message % (words_count, sentences_count))
# create matrix |unique words|×|sentences| filled with zeroes
matrix = numpy.zeros((words_count, sentences_count))
for col, sentence in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
# only valid words is counted (not stop-words, ...)
if word in dictionary:
row = dictionary[word]
matrix[row, col] += 1
return matrix
def _compute_term_frequency(self, matrix, smooth=0.4):
"""
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
"""
assert 0.0 <= smooth < 1.0
max_word_frequencies = numpy.max(matrix, axis=0)
rows, cols = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if max_word_frequency != 0:
frequency = matrix[row, col]/max_word_frequency
matrix[row, col] = smooth + (1.0 - smooth)*frequency
return matrix
def _compute_ranks(self, sigma, v_matrix):
assert len(sigma) == v_matrix.shape[0], "Matrices should be multiplicable"
dimensions = max(LsaSummarizer.MIN_DIMENSIONS,
int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))
powered_sigma = tuple(s**2 if i < dimensions else 0.0
for i, s in enumerate(sigma))
ranks = []
# iterate over columns of matrix (rows of transposed matrix)
for column_vector in v_matrix.T:
rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))
ranks.append(math.sqrt(rank))
return ranks
| 34.983871
| 113
| 0.639696
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
import math
from warnings import warn
try:
import numpy
except ImportError:
numpy = None
try:
from numpy.linalg import svd as singular_value_decomposition
except ImportError:
singular_value_decomposition = None
from ._summarizer import AbstractSummarizer
class LsaSummarizer(AbstractSummarizer):
MIN_DIMENSIONS = 3
REDUCTION_RATIO = 1/1
_stop_words = frozenset()
@property
def stop_words(self):
return self._stop_words
@stop_words.setter
def stop_words(self, words):
self._stop_words = frozenset(map(self.normalize_word, words))
def __call__(self, document, sentences_count):
self._ensure_dependecies_installed()
dictionary = self._create_dictionary(document)
if not dictionary:
return ()
matrix = self._create_matrix(document, dictionary)
matrix = self._compute_term_frequency(matrix)
u, sigma, v = singular_value_decomposition(matrix, full_matrices=False)
ranks = iter(self._compute_ranks(sigma, v))
return self._get_best_sentences(document.sentences, sentences_count,
lambda s: next(ranks))
def _ensure_dependecies_installed(self):
if numpy is None:
raise ValueError("LSA summarizer requires NumPy. Please, install it by command 'pip install numpy'.")
def _create_dictionary(self, document):
words = map(self.normalize_word, document.words)
unique_words = frozenset(self.stem_word(w) for w in words if w not in self._stop_words)
return dict((w, i) for i, w in enumerate(unique_words))
def _create_matrix(self, document, dictionary):
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if words_count < sentences_count:
message = (
"Number of words (%d) is lower than number of sentences (%d). "
"LSA algorithm may not work properly."
)
warn(message % (words_count, sentences_count))
matrix = numpy.zeros((words_count, sentences_count))
for col, sentence in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
if word in dictionary:
row = dictionary[word]
matrix[row, col] += 1
return matrix
def _compute_term_frequency(self, matrix, smooth=0.4):
assert 0.0 <= smooth < 1.0
max_word_frequencies = numpy.max(matrix, axis=0)
rows, cols = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if max_word_frequency != 0:
frequency = matrix[row, col]/max_word_frequency
matrix[row, col] = smooth + (1.0 - smooth)*frequency
return matrix
def _compute_ranks(self, sigma, v_matrix):
assert len(sigma) == v_matrix.shape[0], "Matrices should be multiplicable"
dimensions = max(LsaSummarizer.MIN_DIMENSIONS,
int(len(sigma)*LsaSummarizer.REDUCTION_RATIO))
powered_sigma = tuple(s**2 if i < dimensions else 0.0
for i, s in enumerate(sigma))
ranks = []
for column_vector in v_matrix.T:
rank = sum(s*v**2 for s, v in zip(powered_sigma, column_vector))
ranks.append(math.sqrt(rank))
return ranks
| true
| true
|
790bc205119844fcb221f48e4fbed1b6f813e3cf
| 16,006
|
py
|
Python
|
EFIT2D_Classes.py
|
guillaumedavidphd/efit2d-pyopencl
|
bf571f8de86aec710e92896e901322edc4ba31c1
|
[
"MIT"
] | 9
|
2016-04-28T17:05:29.000Z
|
2020-07-24T09:22:28.000Z
|
EFIT2D_Classes.py
|
guillaumedavidphd/efit2d-pyopencl
|
bf571f8de86aec710e92896e901322edc4ba31c1
|
[
"MIT"
] | 2
|
2019-11-01T22:12:49.000Z
|
2019-11-05T18:52:13.000Z
|
EFIT2D_Classes.py
|
guillaumedavidphd/efit2d-pyopencl
|
bf571f8de86aec710e92896e901322edc4ba31c1
|
[
"MIT"
] | 1
|
2019-11-05T17:04:30.000Z
|
2019-11-05T17:04:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@package EFIT2D_Classes
Support Library: efit2d-pyopencl
Manuscript Title: Optimized OpenCL implementation of the Elastodynamic Finite Integration Technique for viscoelastic media
Authors: M Molero, U Iturraran-Viveros, S Aparicio, M.G. Hernández
Program title: EFIT2D-PyOpenCL
Journal reference: Comput. Phys. Commun.
Programming language: Python.
External routines: numpy, scipy, matplotlib, glumpy, pyopencl
Computer: computers having GPU or Multicore CPU with OpenCL drivers.
All classes here defined are used to define:
- The scenario,
- Material objects,
- Input sources,
- Inspection setup,
- Simulation parameters
"""
import numpy as np
from math import sin, cos, sqrt, pi, exp
import random
import time
from scipy import signal
from scipy.fftpack import fftshift
from skimage.transform import rotate
try:
from Image import Image
except:
from PIL import Image
from matplotlib import cm
import matplotlib.pyplot as plt
def imresize(arr, size, **kwargs):
from PIL import Image
size_list = [int(arr.shape[0] * size), int(arr.shape[1] * size)]
return np.array(Image.fromarray(arr).resize(size_list))
def imrotate(arr, angle, **kwargs):
return rotate(arr, angle=angle)
def RaisedCosinePulse(t, Freq, Amplitude):
"""
Raised-Cosine Pulse
@param t time vector
@param Freq Frequency in Hz
@param Amplitude Real Value of Amplitude
@return Output signal vector
@retval P vector of length equals to the time vector t
"""
N = np.size(t,0)
P = np.zeros((N,),dtype=np.float32)
for m in range(0,N):
if t[m] <= 2.0/Freq:
P[m] = Amplitude *(1-cos(pi*Freq*t[m]))*cos(2*pi*Freq*t[m])
return P
def ricker(t,ts,fsavg):
"""
Ricker Pulse
@param t time vector
@param ts temporal delay
@param fsavg pulse width parameter
@return Output signal vector
"""
a = fsavg*pi*(t-ts)
a2 = a*a
return ((1.0-2.0*a2)*np.exp(-a2))
##
class NewImage:
"""
Class NewImage: Definition of the Main Geometric Scenario.
"""
def __init__(self, Width=40, Height=40,Pixel_mm=10,label=0,SPML=False):
"""
Constructor of the Class NewImage
@param Width Width of the Scenario
@param Height Height of the Scenario
@param Pixel_mm Ratio Pixel per mm
@param label Label
@param SPML Flag used to indicate the boundary conditions
"""
## Width of the Scenario
self.Width = Width
## Height of the Scenario
self.Height = Height
## Ratio Pixel per mm
self.Pixel_mm = Pixel_mm
## Label
self.Label = label
## Flag used to indicate the boundary conditions
self.SPML = SPML
## Dimension 1 of the Scenario Matrix
self.M = int(self.Height * self.Pixel_mm)
## Dimension 2 od the Scenario Matrix
self.N = int(self.Width * self.Pixel_mm)
## Scenarion Matrix (MxN)
self.I = np.ones((self.M,self.N),dtype=np.uint8)*label
self.Itemp = 0
## Size of the Boundary Layer
self.Tap = 0
## Configure if boundary layers will be treated as absorbing layers or air layers.
#
# False: Absorbing layers
#
# True : Air boundaries
self.AirBoundary = False
def createLayer(self, centerW, centerH, Width, Height, label, Theta=0):
"""
Create a Layer
@param centerW center in width-axis of the Layer
@param centerH center in height-axis of the Layer
@param Width Width of the Layer
@param Height Height of the Layer
@param label Label of the layer
@param Theta Rotation Angle
"""
a = int(Height*self.Pixel_mm/2.0)
b = int(Width*self.Pixel_mm/2.0)
for x in range(-a,a):
for y in range(-b,b):
tempX = round (x + centerH*self.Pixel_mm)
tempY = round (y + centerW*self.Pixel_mm)
self.I[tempX,tempY] = label
if Theta != 0:
self.I = imrotate(self.I,Theta,interp='nearest')
def createABS(self,Tap):
"""
Create the boundary layers depending on the boundary conditions required
@param Tap Layer Size
"""
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
self.M, self.N = np.shape(self.I)
TP = round(Tap* self.Pixel_mm )
M_pml = int( self.M + 2*TP )
N_pml = int( self.N + 2*TP )
self.Itemp = 255.0*np.ones((M_pml,N_pml),dtype=np.uint8)
self.Itemp[TP : M_pml-TP, TP : N_pml-TP] = np.copy(self.I)
class Material:
"""
Class Material: Definition of a material
@param name Material Name
@param rho Density (kg/m3)
@param c11 C11 (Pa)
@param c12 C12 (Pa)
@param c22 C22 (Pa)
@param c44 C44 (Pa)
@param eta_v Bulk Viscosity Constant (Pa s)
@param eta_s Shear Viscosity Constant (Pa s)
@param label Material Label
"""
def __init__(self, name="Water",rho=1000,c11=2.19e9,c12=0.0,c22=0.0,c44=0.0,eta_v=0, eta_s=0,label=0):
"""
Constructor of the Material object
"""
## Material Name
self.name = name
##Density (kg/m3)
self.rho = rho
## C11 (Pa)
self.c11 = c11
## C12 (Pa)
self.c12 = c12
## C22 (Pa)
self.c22 = c22
## C44 (Pa)
self.c44 = c44
## Longitudinal Velocity (m/s)
self.VL = sqrt( c11/rho )
## Shear Velocity (m/s)
self.VT = sqrt( c44/rho )
## Bulk Viscosity Constant (Pa s)
self.eta_v = eta_v
## Shear Viscosity Constant (Pa s)
self.eta_s = eta_s
## Material Label
self.Label = label
def __str__(self):
return "Material:"
def __repr__(self):
return "Material:"
class Source:
"""
Class Source: Define the Inspection Type
@param TypeLaunch Type of Inspection: Transmission or PulseEcho
"""
def __init__(self,TypeLaunch = 'Transmission'):
## Type of Inspection: Transmission or PulseEcho
self.TypeLaunch = TypeLaunch
## Define the location of the transducers in function of the type of the Inspection
self.Theta = 0
if self.TypeLaunch == 'PulseEcho':
self.pulseEcho()
elif self.TypeLaunch == 'Transmission':
self.transmission()
def __str__(self):
return "Source: "
def __repr__(self):
return "Source: "
def pulseEcho(self):
"""
Define Theta for PulseEcho Inspection. PulseEcho Inspection uses the same transducer acting as emitter and as receiver
"""
self.Theta = [270*pi/180, 270*pi/180]
def transmission(self):
"""
Define Theta for Transmission Inspection. Transmision uses two transducers, one used as emitter and another as receiver
"""
self.Theta = [270*pi/180, 90*pi/180]
class Transducer:
"""
Class Transducer: Definition of the Transducer Object
@param Size Transducer Size
@param Offset Offset position of the Transducer. By default is set to zero
@param BorderOffset Border offset position of the Transducer. By default is set to zero
@param Location Location is set to zero that indicates Up location
@param name Transducer Name
"""
def __init__(self, Size = 10, Offset=0, BorderOffset=0, Location=0, name = 'emisor'):
"""
Constructor of the Class Transducer
"""
# Location = 0 => Top
## Transducer Size
self.Size = Size
## Offset position of the Transducer. By default is set to zero
#
# This offset is measured taking into account the center of the Scenario in the width-axis
#
# Positive Values indicate offsets toward the right
#
# Negative values indicate offsets toward the left
self.Offset = Offset
## Border offset position of the Transducer. By default is set to zero
#
# This border offset takes into account the center od the Scenario in the width axis
# but this offset is measured in direction of the height-axis
#
# Only Positive values must be defined.
self.BorderOffset = BorderOffset
##Size of the trasnducer in Pixels
self.SizePixel = 0
## Location-> 0: Top. This version only works when the location=0
self.Location = Location
## Name of the transducer
self.name = name
def __str__(self):
return "Transducer: "
def __repr__(self):
return "Transducer: "
####################################################################################
class Signal:
"""
Class Signal: Signal Definition (Source Input for the Simulation)
@param Amplitude Signal Amplitude
@param Frequency Frequency Amplitude
@param Name Name of the Signal: RaisedCosinePulse or RickerPulse
@param ts Time Delay: used only for RickerPulse
"""
def __init__(self, Amplitude=1, Frequency=1e6, name ="RaisedCosinePulse", ts=1):
## Signal Amplitude
self.Amplitude = Amplitude
## Frequency Amplitude
self.Frequency = Frequency
## Name of the Signal: RaisedCosinePulse or RickerPulse
self.name = name
## Time Delay: used only for RickerPulse
if ts == 1:
self.ts = 3.0/Frequency;
def __str__(self):
return "Signal: "
def __repr__(self):
return "Signal: "
def generate(self,t):
"""
Generate the signal waveform
@param t vector time
@return signal vector with the same length as the vector time
"""
if self.name == "RaisedCosinePulse":
return RaisedCosinePulse(t, self.Frequency, self.Amplitude)
elif self.name == "RickerPulse":
return ricker(t, self.ts, self.Frequency)
def saveSignal(self,t):
"""
Save the signal waveform into the object
@param t vector time
"""
self.time_signal = self.generate(t)
######################################
class Inspection:
"""
Class Inspection: used for the configuration of the inspections to be emulated
"""
def __init__(self):
"""
Constructor of the Class Inspection
"""
## Position of the Transducer (Angle)
self.Theta = 0
## Vector x-axis Position of the Transducer
self.XL = 0
## Vector y-axis Position of the Transducer
self.YL = 0
##
self.IR = 0
def __str__(self):
return "Inspection: "
def __repr__(self):
return "Inspection: "
def setTransmisor(self, source, transducer, x2, y2, X0, Y0):
self.Theta = source.Theta
Ntheta = np.size(self.Theta,0)
NXL = int(2*transducer.SizePixel)
xL = np.zeros((NXL,),dtype=np.float32)
yL = np.zeros((NXL,),dtype=np.float32)
for m in range(0,Ntheta):
if np.abs(np.cos(self.Theta[m])) < 1e-5:
yL = np.linspace(y2[m]-transducer.SizePixel,y2[m]+transducer.SizePixel,num=NXL, endpoint=True)
xL[:] = x2[m]*np.ones((NXL,),dtype=np.float32)
elif np.abs(np.cos(self.Theta[m])) == 1:
xL[:] = np.linspace(x2[m]-transducer.SizePixel, x2[m]+transducer.SizePixel,num=NXL, endpoint=True)
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
else:
xL[:] = np.linspace(x2[m]-(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))),x2[m]+(transducer.SizePixel*np.abs(np.cos(self.Theta[m]))), num=NXL, endpoint=True )
yL[:] = y2[m] - ( (x2[m]-X0 )/( y2[m]-Y0 ) )*( xL[:]-x2[m] )
if m==0:
self.XL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.YL = np.zeros((np.size(xL,0),Ntheta),dtype=np.float32)
self.XL[:,m] = (np.around(xL[:]))
self.YL[:,m] = (np.around(yL[:]))
def addOffset(self, image, transducer, NRI):
"""
Handle Offset
"""
NXL = np.size(self.XL,0)
Ntheta = np.size(self.Theta,0)
M_pml, N_pml = np.shape(image.Itemp)
self.YL += (np.around(transducer.Offset * image.Pixel_mm * NRI / float(N_pml)))
self.IR = np.zeros((Ntheta,Ntheta),dtype=np.float32)
B = list(range(0,Ntheta))
self.IR[:,0] = np.int32(B[:])
for i in range(1,Ntheta):
B = np.roll(B,-1)
self.IR[:,i] = np.int32(B)
def addBorderOffset(self, image, transducer, MRI):
"""
Handle Border Offset
"""
M_pml, N_pml = np.shape(image.Itemp)
ratio = float(MRI) / float(M_pml)
self.XL[:,0] += (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
self.XL[:,1] -= (np.around(transducer.BorderOffset * image.Pixel_mm * ratio) )
def flip(self):
self.XL = np.fliplr(self.XL)
def SetReception(self,T):
ReceptorX = (self.XL)
ReceptorY = (self.YL)
M,N = np.shape(ReceptorX)
temp = np.zeros((M,N-1),dtype=np.float32)
for mm in range(0,M):
for ir in range(0,N-1):
temp[mm,ir] = T[ int(ReceptorX[ mm,int(self.IR[0,ir+1]) ] ) , int(ReceptorY[ mm,int(self.IR[0,ir+1]) ]) ]
if self.Field:
return temp.transpose()
else:
return np.mean(temp,0)
def SetReceptionVector(self, T, x, y):
M = np.size(x)
temp = np.zeros((M,),dtype=np.float32)
for mm in range(0,M):
temp[mm] = T[(int(x[mm])),(int(y[mm]))]
return temp
class SimulationModel:
"""
Class Simulation: setup the parameters for the numerical simulation
Usage:
- First Define an Instance of the SimulationModel Object
- Execute the method class: jobParameters using as input the materials list
- Execute the method class: createNumerical Model using as input the scenario
- Execute the method class: initReceivers to initialize the receivers
- Execute the mtehod class: save signal using as input the attribute simModel.t
- Save the Device into the simModel.Device attribute
@param TimeScale Scale Time Factor
@param MaxFreq Maximum Frequency
@param PointCycle Points per Cycle
@param SimTime Time Simuation
@param SpatialScale Spatial Scale: 1 -> meters, 1e-3 -> millimeters
"""
def __init__(self,TimeScale=1, MaxFreq=2e6, PointCycle=10, SimTime=50e6, SpatialScale=1e-3):
## Scale Time Factor
self.TimeScale = TimeScale
## Maximum Frequency
self.MaxFreq = MaxFreq # MHz
## Points per Cycle
self.PointCycle = PointCycle
## Time Simuation
self.SimTime = SimTime # microseconds
## Spatial Scale: 1 -> meters, 1e-3 -> millimeters
self.SpatialScale = SpatialScale
## Spatial Discretization
self.dx = 0
## Temporal Discretization
self.dt = 0
self.Rgrid = 0
self.TapG = 0
self.t = 0
self.Ntiempo = 0
self.MRI,self.NRI = (0,0)
self.receiver_signals = 0
self.Device = 'CPU'
self.XL = 0
self.YL = 0
def __str__(self):
return "Simulation Model: "
def __repr__(self):
return "Simulation Model: "
def jobParameters(self,materiales):
"""
Define Main Simulation Parameters
@parm materiales Materials List
"""
indVL = [mat.VL for mat in materiales if mat.VL > 400]
indVT = [mat.VT for mat in materiales if mat.VT > 400]
VL = np.array(indVL)
VT = np.array(indVT)
V = np.hstack( (VL, VT) )
self.dx = np.float32( np.min([V]) / (self.PointCycle*self.MaxFreq) )
self.dt = self.TimeScale * np.float32( 0.7071 * self.dx / ( np.max([V]) ) )
self.Ntiempo = int(round(self.SimTime/self.dt))
self.t = self.dt*np.arange(0,self.Ntiempo)
def createNumericalModel(self, image):
"""
Create the Numerical Model
@param image The Scenario Object
"""
#Spatial Scale
Mp = np.shape(image.Itemp)[0]*self.SpatialScale/image.Pixel_mm/self.dx
self.Rgrid = Mp/np.shape(image.Itemp)[0]
self.TapG = np.around(image.Tap * self.Rgrid * image.Pixel_mm)
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
self.MRI,self.NRI = np.shape(self.Im)
print("dt: " + str(self.dt) + " dx: " + str(self.dx) + " Grid: " + str(self.MRI) + " x " + str(self.NRI))
def initReceivers(self):
"""
Initialize the receivers
"""
self.receiver_signals = 0
def setDevice(self,Device):
"""
Set the Computation Device
@param Device Device to be used
Define the device used to compute the simulations:
- "CPU" : uses the global memory in th CPU
- "GPU_Global" : uses the global memory in the GPU
- "GPU_Local" : uses the local memory in the GPU
"""
if Device == 0:
self.Device = 'CPU'
elif Device ==1:
self.Device = 'GPU_Global'
elif Device ==2:
self.Device = 'GPU_Local'
| 22.671388
| 167
| 0.642197
|
import numpy as np
from math import sin, cos, sqrt, pi, exp
import random
import time
from scipy import signal
from scipy.fftpack import fftshift
from skimage.transform import rotate
try:
from Image import Image
except:
from PIL import Image
from matplotlib import cm
import matplotlib.pyplot as plt
def imresize(arr, size, **kwargs):
from PIL import Image
size_list = [int(arr.shape[0] * size), int(arr.shape[1] * size)]
return np.array(Image.fromarray(arr).resize(size_list))
def imrotate(arr, angle, **kwargs):
return rotate(arr, angle=angle)
def RaisedCosinePulse(t, Freq, Amplitude):
N = np.size(t,0)
P = np.zeros((N,),dtype=np.float32)
for m in range(0,N):
if t[m] <= 2.0/Freq:
P[m] = Amplitude *(1-cos(pi*Freq*t[m]))*cos(2*pi*Freq*t[m])
return P
def ricker(t,ts,fsavg):
a = fsavg*pi*(t-ts)
a2 = a*a
return ((1.0-2.0*a2)*np.exp(-a2))
class NewImage:
def __init__(self, Width=40, Height=40,Pixel_mm=10,label=0,SPML=False):
t
Pixel_mm
.Label = label
mm)
self.M,self.N),dtype=np.uint8)*label
self.Itemp = 0
rH, Width, Height, label, Theta=0):
a = int(Height*self.Pixel_mm/2.0)
b = int(Width*self.Pixel_mm/2.0)
for x in range(-a,a):
for y in range(-b,b):
tempX = round (x + centerH*self.Pixel_mm)
tempY = round (y + centerW*self.Pixel_mm)
self.I[tempX,tempY] = label
if Theta != 0:
self.I = imrotate(self.I,Theta,interp='nearest')
def createABS(self,Tap):
self.Tap = Tap
self.SPML = True
self.AirBoundary = False
self.M, self.N = np.shape(self.I)
TP = round(Tap* self.Pixel_mm )
M_pml = int( self.M + 2*TP )
N_pml = int( self.N + 2*TP )
self.Itemp = 255.0*np.ones((M_pml,N_pml),dtype=np.uint8)
self.Itemp[TP : M_pml-TP, TP : N_pml-TP] = np.copy(self.I)
class Material:
def __init__(self, name="Water",rho=1000,c11=2.19e9,c12=0.0,c22=0.0,c44=0.0,eta_v=0, eta_s=0,label=0):
name
ho
1 = c11
2 = c12
2 = c22
4 = c44
)
44/rho )
def __str__(self):
return "Material:"
def __repr__(self):
return "Material:"
class Source:
def __init__(self,TypeLaunch = 'Transmission'):
elif self.TypeLaunch == 'Transmission':
self.transmission()
def __str__(self):
return "Source: "
def __repr__(self):
return "Source: "
def pulseEcho(self):
self.Theta = [270*pi/180, 270*pi/180]
def transmission(self):
self.Theta = [270*pi/180, 90*pi/180]
class Transducer:
def __init__(self, Size = 10, Offset=0, BorderOffset=0, Location=0, name = 'emisor'):
Size
def __repr__(self):
return "Transducer: "
0,self.Ntiempo)
def createNumericalModel(self, image):
Mp = np.shape(image.Itemp)[0]*self.SpatialScale/image.Pixel_mm/self.dx
self.Rgrid = Mp/np.shape(image.Itemp)[0]
self.TapG = np.around(image.Tap * self.Rgrid * image.Pixel_mm)
self.Im = imresize(image.Itemp, self.Rgrid, interp='nearest')
self.MRI,self.NRI = np.shape(self.Im)
print("dt: " + str(self.dt) + " dx: " + str(self.dx) + " Grid: " + str(self.MRI) + " x " + str(self.NRI))
def initReceivers(self):
self.receiver_signals = 0
def setDevice(self,Device):
if Device == 0:
self.Device = 'CPU'
elif Device ==1:
self.Device = 'GPU_Global'
elif Device ==2:
self.Device = 'GPU_Local'
| true
| true
|
790bc268fe1805820861572235668815eee79955
| 785
|
py
|
Python
|
workflow_lib/wwq_dbase.py
|
VUB-HYDR/2018_Chawanda_etal
|
46af26916806e2f61fd48d777f88b04da7fffbbe
|
[
"MIT"
] | 14
|
2018-09-27T16:03:10.000Z
|
2021-04-15T06:09:21.000Z
|
workflow_lib/wwq_dbase.py
|
VUB-HYDR/2018_Chawanda_etal
|
46af26916806e2f61fd48d777f88b04da7fffbbe
|
[
"MIT"
] | 2
|
2019-10-24T14:03:41.000Z
|
2019-10-31T22:10:19.000Z
|
workflow_lib/wwq_dbase.py
|
VUB-HYDR/2018_Chawanda_etal
|
46af26916806e2f61fd48d777f88b04da7fffbbe
|
[
"MIT"
] | 7
|
2018-11-14T19:42:59.000Z
|
2021-08-16T07:09:50.000Z
|
import sys
import cj_function_lib as cj
import init_file as variables
import mdbtools as mdt
#print variables.ProjMDB
#print variables.QSWAT_MDB
wwqrng = cj.extract_table_from_mdb(variables.QSWAT_MDB, 'wwqrng', variables.path + "\\wwqrng.tmp~")
wwq_defaults={}
for record in wwqrng: # Getting a list of parameter names for wwq and their defaults
if record.split(",")[0].strip(" ") != "":
wwq_defaults[record.split(",")[0].strip("\[").strip("\]")] = record.split(",")[3]
"""
# here we commit to table the parameters for the wwq to the row in the table wwq
"""
wwq = mdt.mdb_with_ops(variables.ProjMDB)
wwq.clear_table("wwq")
wwq_defaults["OID"] = 1
wwq_defaults = cj.format_data_type(wwq_defaults, wwqrng)
wwq.insert_row("wwq", wwq_defaults, True)
wwq.disconnect()
| 27.068966
| 99
| 0.719745
|
import sys
import cj_function_lib as cj
import init_file as variables
import mdbtools as mdt
wwqrng = cj.extract_table_from_mdb(variables.QSWAT_MDB, 'wwqrng', variables.path + "\\wwqrng.tmp~")
wwq_defaults={}
for record in wwqrng:
if record.split(",")[0].strip(" ") != "":
wwq_defaults[record.split(",")[0].strip("\[").strip("\]")] = record.split(",")[3]
wwq = mdt.mdb_with_ops(variables.ProjMDB)
wwq.clear_table("wwq")
wwq_defaults["OID"] = 1
wwq_defaults = cj.format_data_type(wwq_defaults, wwqrng)
wwq.insert_row("wwq", wwq_defaults, True)
wwq.disconnect()
| true
| true
|
790bc387a653b8e5be169c257067dd014f119978
| 5,889
|
py
|
Python
|
graphnas/evolution_trainer.py
|
mhnnunes/nas_gnn
|
91092acfee9fdbbef3e22252040b80aa96143311
|
[
"Apache-2.0"
] | 13
|
2020-07-29T12:45:22.000Z
|
2022-03-07T06:26:02.000Z
|
graphnas/evolution_trainer.py
|
mhnnunes/nas_gnn
|
91092acfee9fdbbef3e22252040b80aa96143311
|
[
"Apache-2.0"
] | null | null | null |
graphnas/evolution_trainer.py
|
mhnnunes/nas_gnn
|
91092acfee9fdbbef3e22252040b80aa96143311
|
[
"Apache-2.0"
] | 3
|
2020-09-27T06:43:17.000Z
|
2020-11-26T08:43:35.000Z
|
import time
import torch
import numpy as np
from collections import deque
from graphnas.trainer import Trainer
class Evolution_Trainer(Trainer):
"""
This class implements the Asyncronous Aging Evolution,
proposed by Real et. al. on:
Regularized Evolution for Image Classifier Architecture Search
available on: https://arxiv.org/abs/1802.01548
"""
def __init__(self, args):
super(Evolution_Trainer, self).__init__(args)
self.args = args
self.random_seed = args.random_seed
self.population = deque()
self.accuracies = deque()
self.population_size = args.population_size
self.sample_size = args.sample_size
self.cycles = args.cycles
self.init_time = 0
print('initializing population on evolution_trainer init, maybe not the best strategy')
self.__initialize_population()
def derive_from_population(self):
population = self._construct_action(self.population)
best_score_index, _ = \
self._get_best_individual_accuracy(self.accuracies)
best_structure = self.form_gnn_info(population[best_score_index])
print("[DERIVE] Best Structure:", str(best_structure))
# train from scratch to get the final score
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed_all(self.random_seed)
test_scores_list = []
for i in range(10): # run 10 times to get Mean and Stddev
val_acc, test_acc = self.submodel_manager.evaluate(best_structure)
test_scores_list.append(test_acc)
print("[DERIVE] Best Results: ", best_structure, ": ",
np.mean(test_scores_list),
"+/-", np.std(test_scores_list))
def _mutate_individual(self, indiv):
# Choose a random position on the individual to mutate
position_to_mutate = np.random.randint(len(indiv))
# This position will receive a randomly chosen index
# of the search_spaces's list
# for the action corresponding to that position in the individual
sp_list = self.search_space[self.action_list[position_to_mutate]]
indiv[position_to_mutate] = \
np.random.randint(0, len(sp_list))
return indiv
def _get_best_individual_accuracy(self, accs):
max_acc_index = 0
max_acc = -1
for index, acc in enumerate(accs):
if acc > max_acc:
max_acc = acc
max_acc_index = index
return max_acc_index, max_acc
def __initialize_population(self):
print("\n\n===== Evaluating initial random population =====")
start_initial_population_time = time.time()
while len(self.population) < self.population_size:
# print('adding individual #:', len(population))
individual = self._generate_random_individual()
ind_actions = self._construct_action([individual])
gnn = self.form_gnn_info(ind_actions[0])
_, ind_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
print("individual:", individual, " val_score:", ind_acc)
self.accuracies.append(ind_acc)
self.population.append(individual)
end_initial_pop_time = time.time()
self.init_time = end_initial_pop_time - start_initial_population_time
print("Time elapsed initializing population: " +
str(self.init_time))
print("===== Evaluating initial random population DONE ====")
def train(self):
print("\n\n===== Evolution ====")
start_evolution_time = time.time()
while self.cycles > 0:
sample = [] # list with indexes to population individuals
sample_accs = [] # accuracies of the sampled individuals
while len(sample) < self.sample_size:
candidate = np.random.randint(0, len(self.population))
sample.append(self.population[candidate])
sample_accs.append(self.accuracies[candidate])
# Get best individual on sample to serve as parent
max_sample_acc_index, max_sample_acc = \
self._get_best_individual_accuracy(sample_accs)
parent = sample[max_sample_acc_index]
# print('parent: ', parent)
child = parent.copy()
child = self._mutate_individual(child)
# print('child: ', child)
child_actions = self._construct_action([child])
gnn = self.form_gnn_info(child_actions[0])
_, child_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
# print('child acc: ', child_acc)
print("parent: ", str(parent), " val_score: ", str(max_sample_acc),
"| child: ", str(child), ", val_score: ", str(child_acc))
self.accuracies.append(child_acc)
self.population.append(child)
if self.cycles % self.args.eval_cycle == 0:
self.derive_from_population()
# Remove oldest individual (Aging/Regularized evolution)
self.population.popleft()
self.accuracies.popleft()
print("[POPULATION STATS] Mean/Median/Best: ",
np.mean(self.accuracies),
np.median(self.accuracies),
np.max(self.accuracies))
self.cycles -= 1
end_evolution_time = time.time()
total_evolution_time = end_evolution_time - start_evolution_time
print('Time spent on evolution: ' +
str(total_evolution_time))
print('Total elapsed time: ' +
str(total_evolution_time + self.init_time))
print("===== Evolution DONE ====")
def derive(self, sample_num=None):
self.derive_from_population()
| 43.622222
| 95
| 0.626083
|
import time
import torch
import numpy as np
from collections import deque
from graphnas.trainer import Trainer
class Evolution_Trainer(Trainer):
def __init__(self, args):
super(Evolution_Trainer, self).__init__(args)
self.args = args
self.random_seed = args.random_seed
self.population = deque()
self.accuracies = deque()
self.population_size = args.population_size
self.sample_size = args.sample_size
self.cycles = args.cycles
self.init_time = 0
print('initializing population on evolution_trainer init, maybe not the best strategy')
self.__initialize_population()
def derive_from_population(self):
population = self._construct_action(self.population)
best_score_index, _ = \
self._get_best_individual_accuracy(self.accuracies)
best_structure = self.form_gnn_info(population[best_score_index])
print("[DERIVE] Best Structure:", str(best_structure))
np.random.seed(self.random_seed)
torch.manual_seed(self.random_seed)
torch.cuda.manual_seed_all(self.random_seed)
test_scores_list = []
for i in range(10):
val_acc, test_acc = self.submodel_manager.evaluate(best_structure)
test_scores_list.append(test_acc)
print("[DERIVE] Best Results: ", best_structure, ": ",
np.mean(test_scores_list),
"+/-", np.std(test_scores_list))
def _mutate_individual(self, indiv):
position_to_mutate = np.random.randint(len(indiv))
# for the action corresponding to that position in the individual
sp_list = self.search_space[self.action_list[position_to_mutate]]
indiv[position_to_mutate] = \
np.random.randint(0, len(sp_list))
return indiv
def _get_best_individual_accuracy(self, accs):
max_acc_index = 0
max_acc = -1
for index, acc in enumerate(accs):
if acc > max_acc:
max_acc = acc
max_acc_index = index
return max_acc_index, max_acc
def __initialize_population(self):
print("\n\n===== Evaluating initial random population =====")
start_initial_population_time = time.time()
while len(self.population) < self.population_size:
# print('adding individual
individual = self._generate_random_individual()
ind_actions = self._construct_action([individual])
gnn = self.form_gnn_info(ind_actions[0])
_, ind_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
print("individual:", individual, " val_score:", ind_acc)
self.accuracies.append(ind_acc)
self.population.append(individual)
end_initial_pop_time = time.time()
self.init_time = end_initial_pop_time - start_initial_population_time
print("Time elapsed initializing population: " +
str(self.init_time))
print("===== Evaluating initial random population DONE ====")
def train(self):
print("\n\n===== Evolution ====")
start_evolution_time = time.time()
while self.cycles > 0:
sample = [] # list with indexes to population individuals
sample_accs = [] # accuracies of the sampled individuals
while len(sample) < self.sample_size:
candidate = np.random.randint(0, len(self.population))
sample.append(self.population[candidate])
sample_accs.append(self.accuracies[candidate])
# Get best individual on sample to serve as parent
max_sample_acc_index, max_sample_acc = \
self._get_best_individual_accuracy(sample_accs)
parent = sample[max_sample_acc_index]
# print('parent: ', parent)
child = parent.copy()
child = self._mutate_individual(child)
# print('child: ', child)
child_actions = self._construct_action([child])
gnn = self.form_gnn_info(child_actions[0])
_, child_acc = \
self.submodel_manager.train(gnn, format=self.args.format)
# print('child acc: ', child_acc)
print("parent: ", str(parent), " val_score: ", str(max_sample_acc),
"| child: ", str(child), ", val_score: ", str(child_acc))
self.accuracies.append(child_acc)
self.population.append(child)
if self.cycles % self.args.eval_cycle == 0:
self.derive_from_population()
# Remove oldest individual (Aging/Regularized evolution)
self.population.popleft()
self.accuracies.popleft()
print("[POPULATION STATS] Mean/Median/Best: ",
np.mean(self.accuracies),
np.median(self.accuracies),
np.max(self.accuracies))
self.cycles -= 1
end_evolution_time = time.time()
total_evolution_time = end_evolution_time - start_evolution_time
print('Time spent on evolution: ' +
str(total_evolution_time))
print('Total elapsed time: ' +
str(total_evolution_time + self.init_time))
print("===== Evolution DONE ====")
def derive(self, sample_num=None):
self.derive_from_population()
| true
| true
|
790bc3d560d420f92c41fd48572a93e3b284d13b
| 1,645
|
py
|
Python
|
src/game_of_life/python_coderetreat_socramob/cr_socramob08/coord_test.py
|
hemmerling/codingdojo
|
3e8860b78e96ac15cde6a12db3b2431e8b63714f
|
[
"Apache-2.0"
] | null | null | null |
src/game_of_life/python_coderetreat_socramob/cr_socramob08/coord_test.py
|
hemmerling/codingdojo
|
3e8860b78e96ac15cde6a12db3b2431e8b63714f
|
[
"Apache-2.0"
] | null | null | null |
src/game_of_life/python_coderetreat_socramob/cr_socramob08/coord_test.py
|
hemmerling/codingdojo
|
3e8860b78e96ac15cde6a12db3b2431e8b63714f
|
[
"Apache-2.0"
] | null | null | null |
#This file was originally generated by PyScripter's unitest wizard
import unittest
from coord import Coord
from cell import Cell
from field import Field
def dummy():
""" Dummy function for comparison of the return values """
return
class CoordTest(unittest.TestCase):
def setUp(self):
self.field = Field()
pass
def tearDown(self):
pass
def testMain(self):
self.coord = Coord()
assert self.coord.main() == dummy(), 'Gol01.get_size() does not provide the right return value'
def testCoordSavesItsCoordinates(self):
coord = Coord(4,5)
assert 4 == coord.x
assert 5 == coord.y
def testCreatedCellIsAlive(self):
coord1 = Coord(4,5)
cell = Cell(coord1)
assert cell.isAlive() == True, 'cell.status() does not provide the right return value'
def testCellKnowsIfItLivesInTheNextStep(self):
cell = Cell(Coord(4,5))
cell.nextStep(5)
assert False == cell.isAlive()
def addCell(self,x,y):
self.field.add(Cell(Coord(x, y)))
def fillExampleField(self):
self.addCell(1,1)
self.addCell(1,2)
self.addCell(2,1)
def testFieldIsWellCreated(self):
self.fillExampleField()
assert self.field.getNumberOfLivingCells() == 3, 'field.numberOfAliveCells does not provide the right return value'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| 27.416667
| 124
| 0.616413
|
import unittest
from coord import Coord
from cell import Cell
from field import Field
def dummy():
return
class CoordTest(unittest.TestCase):
def setUp(self):
self.field = Field()
pass
def tearDown(self):
pass
def testMain(self):
self.coord = Coord()
assert self.coord.main() == dummy(), 'Gol01.get_size() does not provide the right return value'
def testCoordSavesItsCoordinates(self):
coord = Coord(4,5)
assert 4 == coord.x
assert 5 == coord.y
def testCreatedCellIsAlive(self):
coord1 = Coord(4,5)
cell = Cell(coord1)
assert cell.isAlive() == True, 'cell.status() does not provide the right return value'
def testCellKnowsIfItLivesInTheNextStep(self):
cell = Cell(Coord(4,5))
cell.nextStep(5)
assert False == cell.isAlive()
def addCell(self,x,y):
self.field.add(Cell(Coord(x, y)))
def fillExampleField(self):
self.addCell(1,1)
self.addCell(1,2)
self.addCell(2,1)
def testFieldIsWellCreated(self):
self.fillExampleField()
assert self.field.getNumberOfLivingCells() == 3, 'field.numberOfAliveCells does not provide the right return value'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| true
| true
|
790bc3fe57b4e903bf93fa21bde113be2508747e
| 9,883
|
py
|
Python
|
apps/qa/models.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 8
|
2019-01-30T13:51:59.000Z
|
2022-01-08T03:26:53.000Z
|
apps/qa/models.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 286
|
2019-01-18T21:35:51.000Z
|
2022-03-24T18:53:59.000Z
|
apps/qa/models.py
|
PremierLangage/premierlangage
|
7134a2aadffee2bf264abee6c4b23ea33f1b390b
|
[
"CECILL-B"
] | 4
|
2019-02-11T13:38:30.000Z
|
2021-03-02T20:59:00.000Z
|
import math
from django.conf import settings
from django.db import models
from django.db.models import F
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.utils.text import slugify
from django_markdown.models import MarkdownField
from hitcount.models import HitCountMixin
from taggit.managers import TaggableManager
from qa.mixins import DateMixin
from qa.utils import epoch_seconds
REPUTATION = settings.QA_SETTINGS['reputation']
class QAQuestion(models.Model, HitCountMixin, DateMixin):
"""Model class to contain every question in the forum"""
slug = models.SlugField(max_length=200)
title = models.CharField(max_length=200, blank=False)
description = MarkdownField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_question")
tags = TaggableManager()
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
closed = models.BooleanField(default=False)
points = models.IntegerField(default=0)
popularity = models.FloatField(default=0)
def mod_points(self, points):
p = self.points + points
self.points = F('points') + points
order = math.log(max(abs(p), 1), 10)
sign = 1 if p > 0 else -1 if p < 0 else 0
seconds = epoch_seconds(self.pub_date) - 1134028003
self.popularity = round(sign * order + seconds / 45000, 7)
self.save()
self.refresh_from_db()
def has_accepted_answer(self):
return bool(self.qaanswer_set.filter(answer=True))
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.title)
self.user.profile.mod_rep(REPUTATION['CREATE_QUESTION'])
super(QAQuestion, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_QUESTION'])
super(QAQuestion, self).delete(*args, **kwargs)
def __str__(self):
return self.title
class QAAnswer(models.Model, DateMixin):
"""Model class to contain every answer in the forum and to link it
to the proper question."""
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
answer_text = MarkdownField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_answer")
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
answer = models.BooleanField(default=False)
points = models.IntegerField(default=0)
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_ANSWER'])
super(QAAnswer, self).save(*args, **kwargs)
def mod_points(self, points):
self.points = F('points') + points
self.save()
self.refresh_from_db()
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_ANSWER'])
if self.answer:
self.question.user.profile.mod_rep(-REPUTATION['ANSWER_ACCEPTED'] // 2)
self.user.profile.mod_rep(-REPUTATION['ANSWER_ACCEPTED'])
super(QAAnswer, self).delete(*args, **kwargs)
def __str__(self): # pragma: no cover
return self.answer_text
class Meta:
ordering = ['-answer', '-pub_date']
class VoteParent(models.Model):
"""Abstract model to define the basic elements to every single vote."""
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
value = models.BooleanField(default=True)
class Meta:
abstract = True
class QAAnswerVote(VoteParent):
"""Model class to contain the votes for the answers."""
answer = models.ForeignKey(QAAnswer, on_delete=models.CASCADE)
class Meta:
unique_together = (('user', 'answer'),)
def save(self, *args, **kwargs):
if self.pk is None: # New vote
if self.value:
self.answer.user.profile.mod_rep(REPUTATION['UPVOTE_ANSWER'])
self.answer.mod_points(1)
else:
self.answer.user.profile.mod_rep(REPUTATION['DOWNVOTE_ANSWER'])
self.answer.mod_points(-1)
else: # Changed vote
if self.value:
self.answer.user.profile.mod_rep(REPUTATION['UPVOTE_ANSWER']
- REPUTATION['DOWNVOTE_ANSWER'])
self.answer.mod_points(2)
else:
self.answer.user.profile.mod_rep(REPUTATION['DOWNVOTE_ANSWER']
- REPUTATION['UPVOTE_ANSWER'])
self.answer.mod_points(-2)
super(QAAnswerVote, self).save(*args, **kwargs)
@receiver(pre_delete, sender='qa.QAAnswerVote')
def on_delete(sender, instance, using, **kwargs):
if instance.value:
instance.answer.user.profile.mod_rep(-REPUTATION['UPVOTE_ANSWER'])
instance.answer.points -= 1
else:
instance.answer.user.profile.mod_rep(-REPUTATION['DOWNVOTE_ANSWER'])
instance.answer.points += 1
instance.answer.save()
class QAQuestionVote(VoteParent):
"""Model class to contain the votes for the questions."""
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
class Meta:
unique_together = (('user', 'question'),)
def save(self, *args, **kwargs):
if self.pk is None: # New vote
if self.value:
self.question.user.profile.mod_rep(REPUTATION['UPVOTE_QUESTION'])
self.question.mod_points(1)
else:
self.question.user.profile.mod_rep(REPUTATION['DOWNVOTE_QUESTION'])
self.question.mod_points(-1)
else: # Changed vote
if self.value:
self.question.user.profile.mod_rep(REPUTATION['UPVOTE_QUESTION']
- REPUTATION['DOWNVOTE_QUESTION'])
self.question.mod_points(2)
else:
self.question.user.profile.mod_rep(REPUTATION['DOWNVOTE_QUESTION']
- REPUTATION['UPVOTE_QUESTION'])
self.question.mod_points(-2)
self.question.save()
super(QAQuestionVote, self).save(*args, **kwargs)
@receiver(pre_delete, sender='qa.QAQuestionVote')
def on_delete(sender, instance, using, **kwargs):
if instance.value:
instance.question.user.profile.mod_rep(-REPUTATION['UPVOTE_ANSWER'])
instance.question.points -= 1
else:
instance.question.user.profile.mod_rep(-REPUTATION['DOWNVOTE_ANSWER'])
instance.question.points += 1
instance.question.save()
class BaseComment(models.Model, DateMixin):
"""Abstract model to define the basic elements to every single comment."""
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_comment")
comment_text = models.CharField(max_length=400)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
class Meta:
abstract = True
def __str__(self): # pragma: no cover
return self.comment_text
class QAAnswerComment(BaseComment):
"""Model class to contain the comments for the answers."""
answer = models.ForeignKey(QAAnswer, on_delete=models.CASCADE)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_answer_comment")
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_ANSWER_COMMENT'])
self.answer.user.profile.mod_rep(REPUTATION['RECEIVE_ANSWER_COMMENT'])
super(QAAnswerComment, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_ANSWER_COMMENT'])
self.answer.user.profile.mod_rep(-REPUTATION['RECEIVE_ANSWER_COMMENT'])
super(QAAnswerComment, self).delete(*args, **kwargs)
class QAQuestionComment(BaseComment):
"""Model class to contain the comments for the questions."""
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_question_comment")
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_QUESTION_COMMENT'])
self.question.user.profile.mod_rep(REPUTATION['RECEIVE_QUESTION_COMMENT'])
super(QAQuestionComment, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_QUESTION_COMMENT'])
self.question.user.profile.mod_rep(-REPUTATION['RECEIVE_QUESTION_COMMENT'])
super(QAQuestionComment, self).delete(*args, **kwargs)
| 38.455253
| 99
| 0.641404
|
import math
from django.conf import settings
from django.db import models
from django.db.models import F
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.utils.text import slugify
from django_markdown.models import MarkdownField
from hitcount.models import HitCountMixin
from taggit.managers import TaggableManager
from qa.mixins import DateMixin
from qa.utils import epoch_seconds
REPUTATION = settings.QA_SETTINGS['reputation']
class QAQuestion(models.Model, HitCountMixin, DateMixin):
slug = models.SlugField(max_length=200)
title = models.CharField(max_length=200, blank=False)
description = MarkdownField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_question")
tags = TaggableManager()
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
closed = models.BooleanField(default=False)
points = models.IntegerField(default=0)
popularity = models.FloatField(default=0)
def mod_points(self, points):
p = self.points + points
self.points = F('points') + points
order = math.log(max(abs(p), 1), 10)
sign = 1 if p > 0 else -1 if p < 0 else 0
seconds = epoch_seconds(self.pub_date) - 1134028003
self.popularity = round(sign * order + seconds / 45000, 7)
self.save()
self.refresh_from_db()
def has_accepted_answer(self):
return bool(self.qaanswer_set.filter(answer=True))
def save(self, *args, **kwargs):
if not self.pk:
self.slug = slugify(self.title)
self.user.profile.mod_rep(REPUTATION['CREATE_QUESTION'])
super(QAQuestion, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_QUESTION'])
super(QAQuestion, self).delete(*args, **kwargs)
def __str__(self):
return self.title
class QAAnswer(models.Model, DateMixin):
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
answer_text = MarkdownField()
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_answer")
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
answer = models.BooleanField(default=False)
points = models.IntegerField(default=0)
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_ANSWER'])
super(QAAnswer, self).save(*args, **kwargs)
def mod_points(self, points):
self.points = F('points') + points
self.save()
self.refresh_from_db()
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_ANSWER'])
if self.answer:
self.question.user.profile.mod_rep(-REPUTATION['ANSWER_ACCEPTED'] // 2)
self.user.profile.mod_rep(-REPUTATION['ANSWER_ACCEPTED'])
super(QAAnswer, self).delete(*args, **kwargs)
def __str__(self):
return self.answer_text
class Meta:
ordering = ['-answer', '-pub_date']
class VoteParent(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
value = models.BooleanField(default=True)
class Meta:
abstract = True
class QAAnswerVote(VoteParent):
answer = models.ForeignKey(QAAnswer, on_delete=models.CASCADE)
class Meta:
unique_together = (('user', 'answer'),)
def save(self, *args, **kwargs):
if self.pk is None:
if self.value:
self.answer.user.profile.mod_rep(REPUTATION['UPVOTE_ANSWER'])
self.answer.mod_points(1)
else:
self.answer.user.profile.mod_rep(REPUTATION['DOWNVOTE_ANSWER'])
self.answer.mod_points(-1)
else:
if self.value:
self.answer.user.profile.mod_rep(REPUTATION['UPVOTE_ANSWER']
- REPUTATION['DOWNVOTE_ANSWER'])
self.answer.mod_points(2)
else:
self.answer.user.profile.mod_rep(REPUTATION['DOWNVOTE_ANSWER']
- REPUTATION['UPVOTE_ANSWER'])
self.answer.mod_points(-2)
super(QAAnswerVote, self).save(*args, **kwargs)
@receiver(pre_delete, sender='qa.QAAnswerVote')
def on_delete(sender, instance, using, **kwargs):
if instance.value:
instance.answer.user.profile.mod_rep(-REPUTATION['UPVOTE_ANSWER'])
instance.answer.points -= 1
else:
instance.answer.user.profile.mod_rep(-REPUTATION['DOWNVOTE_ANSWER'])
instance.answer.points += 1
instance.answer.save()
class QAQuestionVote(VoteParent):
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
class Meta:
unique_together = (('user', 'question'),)
def save(self, *args, **kwargs):
if self.pk is None:
if self.value:
self.question.user.profile.mod_rep(REPUTATION['UPVOTE_QUESTION'])
self.question.mod_points(1)
else:
self.question.user.profile.mod_rep(REPUTATION['DOWNVOTE_QUESTION'])
self.question.mod_points(-1)
else:
if self.value:
self.question.user.profile.mod_rep(REPUTATION['UPVOTE_QUESTION']
- REPUTATION['DOWNVOTE_QUESTION'])
self.question.mod_points(2)
else:
self.question.user.profile.mod_rep(REPUTATION['DOWNVOTE_QUESTION']
- REPUTATION['UPVOTE_QUESTION'])
self.question.mod_points(-2)
self.question.save()
super(QAQuestionVote, self).save(*args, **kwargs)
@receiver(pre_delete, sender='qa.QAQuestionVote')
def on_delete(sender, instance, using, **kwargs):
if instance.value:
instance.question.user.profile.mod_rep(-REPUTATION['UPVOTE_ANSWER'])
instance.question.points -= 1
else:
instance.question.user.profile.mod_rep(-REPUTATION['DOWNVOTE_ANSWER'])
instance.question.points += 1
instance.question.save()
class BaseComment(models.Model, DateMixin):
pub_date = models.DateTimeField('date published', auto_now_add=True)
update_date = models.DateTimeField('date updated', null=True)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_comment")
comment_text = models.CharField(max_length=400)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True)
class Meta:
abstract = True
def __str__(self):
return self.comment_text
class QAAnswerComment(BaseComment):
answer = models.ForeignKey(QAAnswer, on_delete=models.CASCADE)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_answer_comment")
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_ANSWER_COMMENT'])
self.answer.user.profile.mod_rep(REPUTATION['RECEIVE_ANSWER_COMMENT'])
super(QAAnswerComment, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_ANSWER_COMMENT'])
self.answer.user.profile.mod_rep(-REPUTATION['RECEIVE_ANSWER_COMMENT'])
super(QAAnswerComment, self).delete(*args, **kwargs)
class QAQuestionComment(BaseComment):
question = models.ForeignKey(QAQuestion, on_delete=models.CASCADE)
update_user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL,
related_name="updated_question_comment")
def save(self, *args, **kwargs):
if self.pk is None:
self.user.profile.mod_rep(REPUTATION['CREATE_QUESTION_COMMENT'])
self.question.user.profile.mod_rep(REPUTATION['RECEIVE_QUESTION_COMMENT'])
super(QAQuestionComment, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
self.user.profile.mod_rep(-REPUTATION['CREATE_QUESTION_COMMENT'])
self.question.user.profile.mod_rep(-REPUTATION['RECEIVE_QUESTION_COMMENT'])
super(QAQuestionComment, self).delete(*args, **kwargs)
| true
| true
|
790bc421be922ae442f5ee89d52a6c4f31a4e50a
| 1,771
|
py
|
Python
|
generate_exampleA.py
|
xianruizhong/SpHAM
|
c85a5fe023bd0d760eb42c896cd57ecc07014087
|
[
"Apache-2.0"
] | 2
|
2022-03-27T06:05:09.000Z
|
2022-03-29T08:37:36.000Z
|
generate_exampleA.py
|
FengxiangHe/SpHAM
|
c85a5fe023bd0d760eb42c896cd57ecc07014087
|
[
"Apache-2.0"
] | null | null | null |
generate_exampleA.py
|
FengxiangHe/SpHAM
|
c85a5fe023bd0d760eb42c896cd57ecc07014087
|
[
"Apache-2.0"
] | 1
|
2022-03-29T08:37:40.000Z
|
2022-03-29T08:37:40.000Z
|
import numpy as np
def generate_A(filename1, filename2, noise = 'gau'):
exp_T = 4000
big_y_true_gau = []
big_y_noise_gau = []
big_y_true_t2 = []
big_y_noise_t2 = []
for times in range(100):
y_true_gau = np.zeros((exp_T, 1, 1))
y_true_gau[0] = np.random.rand()
y_true_gau[1] = np.random.rand()
y_true_t2 = np.zeros((exp_T, 1, 1))
y_true_t2[0] = np.random.rand()
y_true_t2[1] = np.random.rand()
y_noise_gau = y_true_gau.copy()
y_noise_t2 = y_true_t2.copy()
e_gau = np.random.normal(0, 0.3, (exp_T, 1))
e_t2 = np.random.standard_t(2, (exp_T,1))
y_noise_gau[0] = y_true_gau[0] + e_gau[0]
y_noise_gau[1] = y_true_gau[1] + e_gau[1]
y_noise_t2[0] = y_true_t2[0] + e_t2[0]
y_noise_t2[1] = y_true_t2[1] + e_t2[1]
for t in range(2, exp_T):
y_true_gau[t] = (3./2.)*np.sin(np.pi / 2. * y_noise_gau[t - 1]) - np.sin(np.pi / 2. * y_noise_gau[t - 2])
y_noise_gau[t] = y_true_gau[t] + 2* e_gau[t]
y_true_t2[t] = np.sin(np.pi / 2. * y_noise_t2[t - 1]) -np.sin(np.pi / 2. * y_noise_t2[t - 2])
y_noise_t2[t] = y_true_t2[t] + 2* e_t2[t]
big_y_true_gau.append(y_true_gau)
big_y_noise_gau.append(y_noise_gau)
big_y_true_t2.append(y_true_t2)
big_y_noise_t2.append(y_noise_t2)
if noise == 'gau':
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_gau))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_gau))
else:
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_t2))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_t2))
| 41.186047
| 117
| 0.570299
|
import numpy as np
def generate_A(filename1, filename2, noise = 'gau'):
exp_T = 4000
big_y_true_gau = []
big_y_noise_gau = []
big_y_true_t2 = []
big_y_noise_t2 = []
for times in range(100):
y_true_gau = np.zeros((exp_T, 1, 1))
y_true_gau[0] = np.random.rand()
y_true_gau[1] = np.random.rand()
y_true_t2 = np.zeros((exp_T, 1, 1))
y_true_t2[0] = np.random.rand()
y_true_t2[1] = np.random.rand()
y_noise_gau = y_true_gau.copy()
y_noise_t2 = y_true_t2.copy()
e_gau = np.random.normal(0, 0.3, (exp_T, 1))
e_t2 = np.random.standard_t(2, (exp_T,1))
y_noise_gau[0] = y_true_gau[0] + e_gau[0]
y_noise_gau[1] = y_true_gau[1] + e_gau[1]
y_noise_t2[0] = y_true_t2[0] + e_t2[0]
y_noise_t2[1] = y_true_t2[1] + e_t2[1]
for t in range(2, exp_T):
y_true_gau[t] = (3./2.)*np.sin(np.pi / 2. * y_noise_gau[t - 1]) - np.sin(np.pi / 2. * y_noise_gau[t - 2])
y_noise_gau[t] = y_true_gau[t] + 2* e_gau[t]
y_true_t2[t] = np.sin(np.pi / 2. * y_noise_t2[t - 1]) -np.sin(np.pi / 2. * y_noise_t2[t - 2])
y_noise_t2[t] = y_true_t2[t] + 2* e_t2[t]
big_y_true_gau.append(y_true_gau)
big_y_noise_gau.append(y_noise_gau)
big_y_true_t2.append(y_true_t2)
big_y_noise_t2.append(y_noise_t2)
if noise == 'gau':
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_gau))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_gau))
else:
with open(filename1, 'wb') as f:
np.save(f, np.array(big_y_true_t2))
with open(filename2, 'wb') as f:
np.save(f, np.array(big_y_noise_t2))
| true
| true
|
790bc42e7e4be27668c3a9feff0e06386ec66ac7
| 1,332
|
py
|
Python
|
crawler/tweet.py
|
EliasSchramm/TwitterDB
|
237ed8424547a1e9283aec83d4f1dffabd8cb13d
|
[
"MIT"
] | 1
|
2021-12-13T17:33:04.000Z
|
2021-12-13T17:33:04.000Z
|
crawler/tweet.py
|
EliasSchramm/TwitterDB
|
237ed8424547a1e9283aec83d4f1dffabd8cb13d
|
[
"MIT"
] | null | null | null |
crawler/tweet.py
|
EliasSchramm/TwitterDB
|
237ed8424547a1e9283aec83d4f1dffabd8cb13d
|
[
"MIT"
] | null | null | null |
import emoji
import string
class Tweet():
def __init__(self, text: str):
self.text = text.lower()
self.hashtags = self.find("#", forbidden="@")
self.cleanTag()
self.tags = self.find("@", forbidden="#")
def find(self, prefix, forbidden):
ret = []
_text = self.text
_text = _text.replace(forbidden, " ")
_text = _text.replace(" ", "")
_text = _text.replace("!", "")
if not _text.startswith("RT"):
for word in _text.split(" "):
word = self.remove_emojis(word)
if len(word) >= 2 and word.count(prefix) == 1:
word = word.split(prefix)
word = prefix + word[len(word) - 1]
word = word.strip()
if word not in ret and len(word) >= 2 and word.startswith(prefix):
ret.append(word.lower())
return ret
def remove_emojis(self, s):
return ''.join(c for c in s if c not in emoji.UNICODE_EMOJI['en'])
def cleanTag(self):
allowed = list(string.ascii_lowercase + string.ascii_uppercase + string.digits) + ["_", "@", " "]
newtext = ""
for letter in self.text:
if letter in allowed:
newtext += letter
self.text = newtext
| 28.956522
| 105
| 0.512763
|
import emoji
import string
class Tweet():
def __init__(self, text: str):
self.text = text.lower()
self.hashtags = self.find("#", forbidden="@")
self.cleanTag()
self.tags = self.find("@", forbidden="#")
def find(self, prefix, forbidden):
ret = []
_text = self.text
_text = _text.replace(forbidden, " ")
_text = _text.replace(" ", "")
_text = _text.replace("!", "")
if not _text.startswith("RT"):
for word in _text.split(" "):
word = self.remove_emojis(word)
if len(word) >= 2 and word.count(prefix) == 1:
word = word.split(prefix)
word = prefix + word[len(word) - 1]
word = word.strip()
if word not in ret and len(word) >= 2 and word.startswith(prefix):
ret.append(word.lower())
return ret
def remove_emojis(self, s):
return ''.join(c for c in s if c not in emoji.UNICODE_EMOJI['en'])
def cleanTag(self):
allowed = list(string.ascii_lowercase + string.ascii_uppercase + string.digits) + ["_", "@", " "]
newtext = ""
for letter in self.text:
if letter in allowed:
newtext += letter
self.text = newtext
| true
| true
|
790bc4345c5c6326334d97910c66fdd23fb02367
| 835
|
py
|
Python
|
bindings/python/src/test/test_rates_api.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 9
|
2018-07-02T15:21:40.000Z
|
2021-11-24T03:44:39.000Z
|
bindings/python/src/test/test_rates_api.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 8
|
2019-01-08T22:06:12.000Z
|
2022-03-16T15:02:37.000Z
|
bindings/python/src/test/test_rates_api.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 1
|
2021-12-06T19:08:05.000Z
|
2021-12-06T19:08:05.000Z
|
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: support@cloudsmith.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.apis.rates_api import RatesApi
class TestRatesApi(unittest.TestCase):
""" RatesApi unit test stubs """
def setUp(self):
self.api = cloudsmith_api.apis.rates_api.RatesApi()
def tearDown(self):
pass
def test_rates_limits_list(self):
"""
Test case for rates_limits_list
Endpoint to check rate limits for current user.
"""
pass
if __name__ == '__main__':
unittest.main()
| 18.555556
| 68
| 0.691018
|
from __future__ import absolute_import
import os
import sys
import unittest
import cloudsmith_api
from cloudsmith_api.rest import ApiException
from cloudsmith_api.apis.rates_api import RatesApi
class TestRatesApi(unittest.TestCase):
def setUp(self):
self.api = cloudsmith_api.apis.rates_api.RatesApi()
def tearDown(self):
pass
def test_rates_limits_list(self):
pass
if __name__ == '__main__':
unittest.main()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.